You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kibble.apache.org by tu...@apache.org on 2020/10/24 23:43:39 UTC

[kibble] branch master updated: Add Black to pre-commit (#66)

This is an automated email from the ASF dual-hosted git repository.

turbaszek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kibble.git


The following commit(s) were added to refs/heads/master by this push:
     new d7f9031  Add Black to pre-commit (#66)
d7f9031 is described below

commit d7f9031dfd93a2efd676fcbd59443feec01df6ed
Author: Michał Słowikowski <mi...@polidea.com>
AuthorDate: Sun Oct 25 01:43:30 2020 +0200

    Add Black to pre-commit (#66)
    
    Add Black to pre-commit and format whole codebase
---
 .pre-commit-config.yaml                    |   6 +
 docs/source/conf.py                        |  64 +++---
 kibble/__main__.py                         |   3 +-
 kibble/api/handler.py                      |  88 ++++----
 kibble/api/pages/__init__.py               |   8 +-
 kibble/api/pages/account.py                | 114 ++++++----
 kibble/api/pages/bio/bio.py                | 153 ++++++-------
 kibble/api/pages/bio/newtimers.py          | 285 +++++++++---------------
 kibble/api/pages/bio/trends.py             | 258 +++++++---------------
 kibble/api/pages/ci/queue.py               | 164 ++++++--------
 kibble/api/pages/ci/status.py              | 133 +++++------
 kibble/api/pages/ci/top-buildcount.py      | 128 +++++------
 kibble/api/pages/ci/top-buildtime.py       | 132 +++++------
 kibble/api/pages/code/changes.py           | 154 ++++++-------
 kibble/api/pages/code/commits.py           | 135 +++++-------
 kibble/api/pages/code/committers.py        | 245 ++++++++-------------
 kibble/api/pages/code/evolution.py         | 109 ++++-----
 kibble/api/pages/code/pony-timeseries.py   | 129 +++++------
 kibble/api/pages/code/pony.py              | 220 +++++++------------
 kibble/api/pages/code/punchcard.py         | 132 +++++------
 kibble/api/pages/code/relationships.py     | 217 +++++++++---------
 kibble/api/pages/code/retention.py         | 144 +++++-------
 kibble/api/pages/code/sloc.py              |  85 +++-----
 kibble/api/pages/code/top-commits.py       | 113 ++++------
 kibble/api/pages/code/top-sloc.py          |  73 +++----
 kibble/api/pages/code/trends.py            | 323 ++++++++++-----------------
 kibble/api/pages/filters.py                |  39 ++--
 kibble/api/pages/forum/actors.py           | 196 +++++++----------
 kibble/api/pages/forum/creators.py         | 126 +++++------
 kibble/api/pages/forum/issues.py           | 210 ++++++++----------
 kibble/api/pages/forum/responders.py       | 127 +++++------
 kibble/api/pages/forum/top-count.py        | 102 ++++-----
 kibble/api/pages/forum/top.py              | 102 ++++-----
 kibble/api/pages/forum/trends.py           | 306 +++++++++-----------------
 kibble/api/pages/issue/actors.py           | 204 +++++++----------
 kibble/api/pages/issue/age.py              |  97 ++++----
 kibble/api/pages/issue/closers.py          | 129 +++++------
 kibble/api/pages/issue/issues.py           | 211 ++++++++----------
 kibble/api/pages/issue/openers.py          | 128 +++++------
 kibble/api/pages/issue/pony-timeseries.py  | 132 +++++------
 kibble/api/pages/issue/relationships.py    | 222 +++++++++----------
 kibble/api/pages/issue/retention.py        | 148 ++++++-------
 kibble/api/pages/issue/top-count.py        | 106 ++++-----
 kibble/api/pages/issue/top.py              | 107 ++++-----
 kibble/api/pages/issue/trends.py           | 330 ++++++++++------------------
 kibble/api/pages/mail/keyphrases.py        |  93 +++-----
 kibble/api/pages/mail/map.py               | 238 ++++++++++----------
 kibble/api/pages/mail/mood-timeseries.py   | 126 ++++-------
 kibble/api/pages/mail/mood.py              | 163 ++++++--------
 kibble/api/pages/mail/pony-timeseries.py   | 121 ++++------
 kibble/api/pages/mail/relationships.py     | 192 ++++++++--------
 kibble/api/pages/mail/retention.py         | 136 +++++-------
 kibble/api/pages/mail/timeseries-single.py | 105 ++++-----
 kibble/api/pages/mail/timeseries.py        | 155 ++++++-------
 kibble/api/pages/mail/top-authors.py       | 115 ++++------
 kibble/api/pages/mail/top-topics.py        |  95 ++++----
 kibble/api/pages/mail/trends.py            | 340 +++++++++++++----------------
 kibble/api/pages/org/contributors.py       | 124 +++++------
 kibble/api/pages/org/list.py               |  83 ++++---
 kibble/api/pages/org/members.py            | 194 +++++++++-------
 kibble/api/pages/org/sourcetypes.py        |   4 -
 kibble/api/pages/org/trends.py             | 155 +++++--------
 kibble/api/pages/session.py                |  96 ++++----
 kibble/api/pages/sources.py                | 174 ++++++++-------
 kibble/api/pages/verify.py                 |  30 ++-
 kibble/api/pages/views.py                  | 236 ++++++++++----------
 kibble/api/pages/widgets.py                |  10 +-
 kibble/api/plugins/database.py             | 132 ++++++-----
 kibble/api/plugins/openapi.py              | 247 +++++++++++++--------
 kibble/api/plugins/session.py              | 183 +++++++++-------
 kibble/api/yaml/openapi/combine.py         |  48 ++--
 kibble/settings.py                         |   4 +-
 kibble/setup/makeaccount.py                |  76 ++++---
 kibble/setup/setup.py                      | 239 ++++++++++----------
 setup.py                                   |  75 +++----
 75 files changed, 4564 insertions(+), 6062 deletions(-)

diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 0bb7740..889d623 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -68,3 +68,9 @@ repos:
           - --license-filepath
           - license-templates/LICENSE.txt
           - --fuzzy-match-generates-todo
+  - repo: https://github.com/psf/black
+    rev: 19.3b0
+    hooks:
+    - id: black
+      name: Black
+      types: [python]
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 9f49f27..35f0f22 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -46,34 +46,33 @@
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
-extensions = ['sphinx.ext.todo',
-    'sphinx.ext.imgmath']
+extensions = ["sphinx.ext.todo", "sphinx.ext.imgmath"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix(es) of source filenames.
 # You can specify multiple suffix as a list of string:
 #
 # source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'Apache Kibble'
-copyright = u'2018, The Apache Kibble Community'
-author = u'The Apache Kibble Community'
+project = u"Apache Kibble"
+copyright = u"2018, The Apache Kibble Community"
+author = u"The Apache Kibble Community"
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = u'0.1'
+version = u"0.1"
 # The full version, including alpha/beta/rc tags.
-release = u'0.1'
+release = u"0.1"
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -88,7 +87,7 @@ language = None
 exclude_patterns = []
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 # If true, `todo` and `todoList` produce output, else they produce nothing.
 todo_include_todos = True
@@ -99,8 +98,8 @@ todo_include_todos = True
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
 #
-html_theme = 'sphinx_rtd_theme'
-html_logo = '_static/images/kibble-logo.png'
+html_theme = "sphinx_rtd_theme"
+html_logo = "_static/images/kibble-logo.png"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
@@ -111,7 +110,7 @@ html_logo = '_static/images/kibble-logo.png'
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # Custom sidebar templates, must be a dictionary that maps document names
 # to template names.
@@ -119,9 +118,9 @@ html_static_path = ['_static']
 # This is required for the alabaster theme
 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
 html_sidebars = {
-    '**': [
-        'relations.html',  # needs 'show_related': True theme option to display
-        'searchbox.html',
+    "**": [
+        "relations.html",  # needs 'show_related': True theme option to display
+        "searchbox.html",
     ]
 }
 
@@ -129,7 +128,7 @@ html_sidebars = {
 # -- Options for HTMLHelp output ------------------------------------------
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'ApacheKibbledoc'
+htmlhelp_basename = "ApacheKibbledoc"
 
 
 # -- Options for LaTeX output ---------------------------------------------
@@ -138,15 +137,12 @@ latex_elements = {
     # The paper size ('letterpaper' or 'a4paper').
     #
     # 'papersize': 'letterpaper',
-
     # The font size ('10pt', '11pt' or '12pt').
     #
     # 'pointsize': '10pt',
-
     # Additional stuff for the LaTeX preamble.
     #
     # 'preamble': '',
-
     # Latex figure (float) alignment
     #
     # 'figure_align': 'htbp',
@@ -156,8 +152,13 @@ latex_elements = {
 # (source start file, target name, title,
 #  author, documentclass [howto, manual, or own class]).
 latex_documents = [
-    (master_doc, 'ApacheKibble.tex', u'Apache Kibble Documentation',
-     u'The Apache Kibble Community', 'manual'),
+    (
+        master_doc,
+        "ApacheKibble.tex",
+        u"Apache Kibble Documentation",
+        u"The Apache Kibble Community",
+        "manual",
+    )
 ]
 
 
@@ -165,10 +166,7 @@ latex_documents = [
 
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
-man_pages = [
-    (master_doc, 'apachekibble', u'Apache Kibble Documentation',
-     [author], 1)
-]
+man_pages = [(master_doc, "apachekibble", u"Apache Kibble Documentation", [author], 1)]
 
 
 # -- Options for Texinfo output -------------------------------------------
@@ -177,7 +175,13 @@ man_pages = [
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-    (master_doc, 'ApacheKibble', u'Apache Kibble Documentation',
-     author, 'ApacheKibble', 'One line description of project.',
-     'Miscellaneous'),
+    (
+        master_doc,
+        "ApacheKibble",
+        u"Apache Kibble Documentation",
+        author,
+        "ApacheKibble",
+        "One line description of project.",
+        "Miscellaneous",
+    )
 ]
diff --git a/kibble/__main__.py b/kibble/__main__.py
index c7a18f7..aea89fd 100644
--- a/kibble/__main__.py
+++ b/kibble/__main__.py
@@ -15,9 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 
+
 def main():
     print("Hello to kibble!")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
diff --git a/kibble/api/handler.py b/kibble/api/handler.py
index 508a7c0..63d2489 100644
--- a/kibble/api/handler.py
+++ b/kibble/api/handler.py
@@ -1,5 +1,3 @@
-
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -40,8 +38,9 @@ from kibble.api.plugins.session import KibbleSession
 from kibble.settings import KIBBLE_YAML, YAML_DIRECTORY
 
 urls = []
-if __name__ != '__main__':
+if __name__ != "__main__":
     from kibble.api.pages import handlers
+
     for page, handler in handlers.items():
         urls.append((r"^(/api/%s)(/.+)?$" % page, handler.run))
 
@@ -68,6 +67,7 @@ class KibbleAPIWrapper:
     """
     Middleware wrapper for exceptions in the application
     """
+
     def __init__(self, path, func):
         self.func = func
         self.API = KibbleOpenAPI
@@ -79,33 +79,29 @@ class KibbleAPIWrapper:
         try:
             # Read JSON client data if any
             try:
-                request_size = int(environ.get('CONTENT_LENGTH', 0))
+                request_size = int(environ.get("CONTENT_LENGTH", 0))
             except (ValueError):
                 request_size = 0
-            requestBody = environ['wsgi.input'].read(request_size)
+            requestBody = environ["wsgi.input"].read(request_size)
             formdata = {}
             if requestBody and len(requestBody) > 0:
                 try:
-                    formdata = json.loads(requestBody.decode('utf-8'))
+                    formdata = json.loads(requestBody.decode("utf-8"))
                 except json.JSONDecodeError as err:
-                    start_response('400 Invalid request', [
-                               ('Content-Type', 'application/json')])
-                    yield json.dumps({
-                        "code": 400,
-                        "reason": "Invalid JSON: %s" % err
-                    })
+                    start_response(
+                        "400 Invalid request", [("Content-Type", "application/json")]
+                    )
+                    yield json.dumps({"code": 400, "reason": "Invalid JSON: %s" % err})
                     return
 
             # Validate URL against OpenAPI specs
             try:
-                self.API.validate(environ['REQUEST_METHOD'], self.path, formdata)
+                self.API.validate(environ["REQUEST_METHOD"], self.path, formdata)
             except openapi.OpenAPIException as err:
-                start_response('400 Invalid request', [
-                            ('Content-Type', 'application/json')])
-                yield json.dumps({
-                    "code": 400,
-                    "reason": err.message
-                })
+                start_response(
+                    "400 Invalid request", [("Content-Type", "application/json")]
+                )
+                yield json.dumps({"code": 400, "reason": err.message})
                 return
 
             # Call page with env, SR and form data
@@ -116,45 +112,41 @@ class KibbleAPIWrapper:
                         yield bucket
             except KibbleHTTPError as err:
                 errHeaders = {
-                    403: '403 Authentication failed',
-                    404: '404 Resource not found',
-                    500: '500 Internal Server Error',
-                    501: '501 Gateway error'
+                    403: "403 Authentication failed",
+                    404: "404 Resource not found",
+                    500: "500 Internal Server Error",
+                    501: "501 Gateway error",
                 }
-                errHeader = errHeaders[err.code] if err.code in errHeaders else "400 Bad request"
-                start_response(errHeader, [
-                            ('Content-Type', 'application/json')])
-                yield json.dumps({
-                    "code": err.code,
-                    "reason": err.message
-                }, indent = 4) + "\n"
+                errHeader = (
+                    errHeaders[err.code]
+                    if err.code in errHeaders
+                    else "400 Bad request"
+                )
+                start_response(errHeader, [("Content-Type", "application/json")])
+                yield json.dumps(
+                    {"code": err.code, "reason": err.message}, indent=4
+                ) + "\n"
                 return
 
         except:
             err_type, err_value, tb = sys.exc_info()
-            traceback_output = ['API traceback:']
+            traceback_output = ["API traceback:"]
             traceback_output += traceback.format_tb(tb)
-            traceback_output.append('%s: %s' % (err_type.__name__, err_value))
+            traceback_output.append("%s: %s" % (err_type.__name__, err_value))
             # We don't know if response has been given yet, try giving one, fail gracefully.
             try:
-                start_response('500 Internal Server Error', [
-                               ('Content-Type', 'application/json')])
+                start_response(
+                    "500 Internal Server Error", [("Content-Type", "application/json")]
+                )
             except:
                 pass
-            yield json.dumps({
-                "code": "500",
-                "reason": '\n'.join(traceback_output)
-            })
+            yield json.dumps({"code": "500", "reason": "\n".join(traceback_output)})
 
 
 def fourohfour(environ, start_response):
     """A very simple 404 handler"""
-    start_response("404 Not Found", [
-                ('Content-Type', 'application/json')])
-    yield json.dumps({
-        "code": 404,
-        "reason": "API endpoint not found"
-    }, indent = 4) + "\n"
+    start_response("404 Not Found", [("Content-Type", "application/json")])
+    yield json.dumps({"code": 404, "reason": "API endpoint not found"}, indent=4) + "\n"
     return
 
 
@@ -165,7 +157,7 @@ def application(environ, start_response):
     it and returns the output.
     """
     db = KibbleDatabase(config)
-    path = environ.get('PATH_INFO', '')
+    path = environ.get("PATH_INFO", "")
     for regex, function in urls:
         m = re.match(regex, path)
         if m:
@@ -182,14 +174,14 @@ def application(environ, start_response):
                 a += 1
                 # WSGI prefers byte strings, so convert if regular py3 string
                 if isinstance(bucket, str):
-                    yield bytes(bucket, encoding = 'utf-8')
+                    yield bytes(bucket, encoding="utf-8")
                 elif isinstance(bucket, bytes):
                     yield bucket
             return
 
     for bucket in fourohfour(environ, start_response):
-        yield bytes(bucket, encoding = 'utf-8')
+        yield bytes(bucket, encoding="utf-8")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     KibbleOpenAPI.toHTML()
diff --git a/kibble/api/pages/__init__.py b/kibble/api/pages/__init__.py
index a9b1b9d..76f1235 100644
--- a/kibble/api/pages/__init__.py
+++ b/kibble/api/pages/__init__.py
@@ -24,6 +24,7 @@ Kibble API scripts library:
 
 import importlib
 import os
+
 # Define all the submodules we have
 
 rootpath = os.path.join(os.path.dirname(os.path.realpath(__file__)))
@@ -32,6 +33,7 @@ print("Reading pages from %s" % rootpath)
 # Import each submodule into a hash called 'handlers'
 handlers = {}
 
+
 def loadPage(path):
     for el in os.listdir(path):
         filepath = os.path.join(path, el)
@@ -39,8 +41,10 @@ def loadPage(path):
             if os.path.isdir(filepath):
                 loadPage(filepath)
             else:
-                p = filepath.replace(rootpath, "")[1:].replace('/', '.')[:-3]
-                xp = p.replace('.', '/')
+                p = filepath.replace(rootpath, "")[1:].replace("/", ".")[:-3]
+                xp = p.replace(".", "/")
                 print("Loading endpoint pages.%s as %s" % (p, xp))
                 handlers[xp] = importlib.import_module(f"kibble.api.pages.{p}")
+
+
 loadPage(rootpath)
diff --git a/kibble/api/pages/account.py b/kibble/api/pages/account.py
index 8821eee..196d06a 100644
--- a/kibble/api/pages/account.py
+++ b/kibble/api/pages/account.py
@@ -106,44 +106,55 @@ import email.message
 
 def sendCode(session, addr, code):
     msg = email.message.EmailMessage()
-    msg['To'] = addr
-    msg['From'] = session.config['mail']['sender']
-    msg['Subject'] = "Please verify your account"
-    msg.set_content("""\
+    msg["To"] = addr
+    msg["From"] = session.config["mail"]["sender"]
+    msg["Subject"] = "Please verify your account"
+    msg.set_content(
+        """\
 Hi there!
 Please verify your account by visiting:
 %s/api/verify/%s/%s
 
 With regards,
 Apache Kibble.
-""" % (session.url, addr, code)
+"""
+        % (session.url, addr, code)
+    )
+    s = smtplib.SMTP(
+        "%s:%s"
+        % (session.config["mail"]["mailhost"], session.config["mail"]["mailport"])
     )
-    s = smtplib.SMTP("%s:%s" % (session.config['mail']['mailhost'], session.config['mail']['mailport']))
     s.send_message(msg)
     s.quit()
 
+
 def run(API, environ, indata, session):
 
-    method = environ['REQUEST_METHOD']
+    method = environ["REQUEST_METHOD"]
 
     # Add a new account??
     if method == "PUT":
-        u = indata['email']
-        p = indata['password']
-        d = indata['displayname']
+        u = indata["email"]
+        p = indata["password"]
+        d = indata["displayname"]
 
         # Are new accounts allowed? (admin can always make accounts, of course)
-        if not session.config['accounts'].get('allowSignup', False):
-            if not (session.user and session.user['level'] == 'admin'):
-                raise API.exception(403, "New account requests have been administratively disabled.")
+        if not session.config["accounts"].get("allowSignup", False):
+            if not (session.user and session.user["level"] == "admin"):
+                raise API.exception(
+                    403, "New account requests have been administratively disabled."
+                )
 
         # Check if we already have that username in use
-        if session.DB.ES.exists(index=session.DB.dbname, doc_type='useraccount', id = u):
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="useraccount", id=u):
             raise API.exception(403, "Username already in use")
 
         # We require a username, displayName password of at least 3 chars each
         if len(p) < 3 or len(u) < 3 or len(d) < 3:
-            raise API.exception(400, "Username, display-name and password must each be at elast 3 characters long.")
+            raise API.exception(
+                400,
+                "Username, display-name and password must each be at elast 3 characters long.",
+            )
 
         # We loosely check that the email is an email
         if not re.match(r"^\S+@\S+\.\S+$", u):
@@ -151,7 +162,7 @@ def run(API, environ, indata, session):
 
         # Okay, let's make an account...I guess
         salt = bcrypt.gensalt()
-        pwd = bcrypt.hashpw(p.encode('utf-8'), salt).decode('ascii')
+        pwd = bcrypt.hashpw(p.encode("utf-8"), salt).decode("ascii")
 
         # Verification code, if needed
         vsalt = bcrypt.gensalt()
@@ -161,32 +172,33 @@ def run(API, environ, indata, session):
         # This is so previously unverified accounts don'thave to verify
         # if we later turn verification on.
         verified = True
-        if session.config['accounts'].get('verify'):
+        if session.config["accounts"].get("verify"):
             verified = False
-            sendCode(session, u, vcode) # Send verification email
+            sendCode(session, u, vcode)  # Send verification email
             # If verification email fails, skip account creation.
 
         doc = {
-            'email': u,                         # Username (email)
-            'password': pwd,                    # Hashed password
-            'displayName': d,                   # Display Name
-            'organisations': [],                # Orgs user belongs to (default is none)
-            'ownerships': [],                   # Orgs user owns (default is none)
-            'defaultOrganisation': None,        # Default org for user
-            'verified': verified,               # Account verified via email?
-            'vcode': vcode,                     # Verification code
-            'userlevel': "user"                 # User level (user/admin)
+            "email": u,  # Username (email)
+            "password": pwd,  # Hashed password
+            "displayName": d,  # Display Name
+            "organisations": [],  # Orgs user belongs to (default is none)
+            "ownerships": [],  # Orgs user owns (default is none)
+            "defaultOrganisation": None,  # Default org for user
+            "verified": verified,  # Account verified via email?
+            "vcode": vcode,  # Verification code
+            "userlevel": "user",  # User level (user/admin)
         }
 
-
         # If we have auto-invite on, check if there are orgs to invite to
-        if 'autoInvite' in session.config['accounts']:
-            dom = u.split('@')[-1].lower()
-            for ai in session.config['accounts']['autoInvite']:
-                if ai['domain'] == dom:
-                    doc['organisations'].append(ai['organisation'])
-
-        session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = u, body = doc)
+        if "autoInvite" in session.config["accounts"]:
+            dom = u.split("@")[-1].lower()
+            for ai in session.config["accounts"]["autoInvite"]:
+                if ai["domain"] == dom:
+                    doc["organisations"].append(ai["organisation"])
+
+        session.DB.ES.index(
+            index=session.DB.dbname, doc_type="useraccount", id=u, body=doc
+        )
         yield json.dumps({"message": "Account created!", "verified": verified})
         return
 
@@ -194,24 +206,30 @@ def run(API, environ, indata, session):
     if not session.user:
         raise API.exception(403, "You must be logged in to use this API endpoint! %s")
 
-
     # Patch (edit) an account
     if method == "PATCH":
-        userid = session.user['email']
-        if indata.get('email') and session.user['userlevel'] == "admin":
-            userid = indata.get('email')
-        doc = session.DB.ES.get(index=session.DB.dbname, doc_type='useraccount', id = userid)
-        udoc = doc['_source']
-        if indata.get('defaultOrganisation'):
+        userid = session.user["email"]
+        if indata.get("email") and session.user["userlevel"] == "admin":
+            userid = indata.get("email")
+        doc = session.DB.ES.get(
+            index=session.DB.dbname, doc_type="useraccount", id=userid
+        )
+        udoc = doc["_source"]
+        if indata.get("defaultOrganisation"):
             # Make sure user is a member or admin here..
-            if session.user['userlevel'] == "admin" or indata.get('defaultOrganisation') in udoc['organisations']:
-                udoc['defaultOrganisation'] = indata.get('defaultOrganisation')
+            if (
+                session.user["userlevel"] == "admin"
+                or indata.get("defaultOrganisation") in udoc["organisations"]
+            ):
+                udoc["defaultOrganisation"] = indata.get("defaultOrganisation")
         # Changing pasword?
-        if indata.get('password'):
-            p = indata.get('password')
+        if indata.get("password"):
+            p = indata.get("password")
             salt = bcrypt.gensalt()
-            pwd = bcrypt.hashpw(p.encode('utf-8'), salt).decode('ascii')
+            pwd = bcrypt.hashpw(p.encode("utf-8"), salt).decode("ascii")
         # Update user doc
-        session.DB.ES.index(index=session.DB.dbname, doc_type='useraccount', id = userid, body = udoc)
+        session.DB.ES.index(
+            index=session.DB.dbname, doc_type="useraccount", id=userid, body=udoc
+        )
         yield json.dumps({"message": "Account updated!"})
         return
diff --git a/kibble/api/pages/bio/bio.py b/kibble/api/pages/bio/bio.py
index c2c91cb..32f62c4 100644
--- a/kibble/api/pages/bio/bio.py
+++ b/kibble/api/pages/bio/bio.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the contributor trends renderer for Kibble
 """
@@ -72,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,116 +80,101 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
+    dOrg = session.user["defaultOrganisation"] or "apache"
 
-    dOrg = session.user['defaultOrganisation'] or "apache"
-
-    pid = hashlib.sha1( ("%s%s" % (dOrg, indata.get('email', '???'))).encode('ascii', errors='replace')).hexdigest()
+    pid = hashlib.sha1(
+        ("%s%s" % (dOrg, indata.get("email", "???"))).encode("ascii", errors="replace")
+    ).hexdigest()
     person = {}
-    if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = pid):
-        person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = pid)['_source']
+    if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=pid):
+        person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=pid)[
+            "_source"
+        ]
     else:
         raise API.exception(404, "No such biography!")
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                },
-                'size': 1,
-                'sort': [{ 'ts': 'asc' }]
-            }
+        "query": {"bool": {"must": [{"term": {"organisation": dOrg}}]}},
+        "size": 1,
+        "sort": [{"ts": "asc"}],
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        codeKey = 'committer_email'
-        query['query']['bool']['should'] = [
-            {'term': {'issueCreator': indata.get('email')}},
-            {'term': {'issueCloser': indata.get('email')}},
-            {'term': {'sender': indata.get('email')}},
-            {'term': {codeKey: indata.get('email')}},
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        codeKey = "committer_email"
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+            {"term": {"sender": indata.get("email")}},
+            {"term": {codeKey: indata.get("email")}},
         ]
-        query['query']['bool']['minimum_should_match'] = 1
-
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # FIRST EMAIL
-    res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="email",
-            body = query
-        )
+    res = session.DB.ES.search(index=session.DB.dbname, doc_type="email", body=query)
     firstEmail = None
-    if res['hits']['hits']:
-        firstEmail = res['hits']['hits'][0]['_source']['ts']
+    if res["hits"]["hits"]:
+        firstEmail = res["hits"]["hits"][0]["_source"]["ts"]
 
     # FIRST COMMIT
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
     firstCommit = None
-    if res['hits']['hits']:
-        firstCommit = res['hits']['hits'][0]['_source']['ts']
+    if res["hits"]["hits"]:
+        firstCommit = res["hits"]["hits"][0]["_source"]["ts"]
 
     # FIRST AUTHORSHIP
-    query['query']['bool']['should'][3] = {'term': {'author_email': indata.get('email')}}
+    query["query"]["bool"]["should"][3] = {
+        "term": {"author_email": indata.get("email")}
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
     firstAuthor = None
-    if res['hits']['hits']:
-        firstAuthor = res['hits']['hits'][0]['_source']['ts']
-
+    if res["hits"]["hits"]:
+        firstAuthor = res["hits"]["hits"][0]["_source"]["ts"]
 
     # COUNT EMAIL, CODE, LINES CHANGED
-    del query['sort']
-    del query['size']
+    del query["sort"]
+    del query["size"]
     no_emails = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="email",
-            body = query
-        )['count']
+        index=session.DB.dbname, doc_type="email", body=query
+    )["count"]
 
     no_commits = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )['count']
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )["count"]
 
     JSON_OUT = {
-        'found': True,
-        'bio': {
-            'organisation': dOrg,
-            'name': person['name'],
-            'email': person['email'],
-            'id': pid,
-            'gravatar': hashlib.md5(person['email'].lower().encode('utf-8')).hexdigest(),
-            'firstEmail': firstEmail,
-            'firstCommit': firstCommit,
-            'firstAuthor': firstAuthor,
-            'tags': person.get('tags', []),
-            'alts': person.get('alts', []),
-            'emails': no_emails,
-            'commits': no_commits
+        "found": True,
+        "bio": {
+            "organisation": dOrg,
+            "name": person["name"],
+            "email": person["email"],
+            "id": pid,
+            "gravatar": hashlib.md5(
+                person["email"].lower().encode("utf-8")
+            ).hexdigest(),
+            "firstEmail": firstEmail,
+            "firstCommit": firstCommit,
+            "firstAuthor": firstAuthor,
+            "tags": person.get("tags", []),
+            "alts": person.get("alts", []),
+            "emails": no_emails,
+            "commits": no_commits,
         },
-        'okay': True,
-        'responseTime': time.time() - now
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/bio/newtimers.py b/kibble/api/pages/bio/newtimers.py
index e85a08b..57b590a 100644
--- a/kibble/api/pages/bio/newtimers.py
+++ b/kibble/api/pages/bio/newtimers.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the newtimers list renderer for Kibble
 """
@@ -72,44 +69,25 @@ import json
 import time
 import hashlib
 
+
 def find_earlier(session, query, when, who, which, where, doctype, dOrg):
     """Find earlier document pertaining to this user. return True if found"""
-    if 'aggs' in query:
-        del query['aggs']
-
-    rangeQuery = {'range':
-                    {
-                        which: {
-                            'from': 0,
-                            'to': time.time()
-                        }
-                    }
-                }
-
-    query['query']['bool']['must'] = [
-        rangeQuery,
-        {
-            'term': {
-                'organisation': dOrg
-            }
-        },
-        {
-            'term': {
-                where: who
-            }
+    if "aggs" in query:
+        del query["aggs"]
 
-        }
-        ]
-    query['size'] = 1
-    query['sort'] = [{ which: 'asc' }]
+    rangeQuery = {"range": {which: {"from": 0, "to": time.time()}}}
 
-    res = session.DB.ES.search(
-        index=session.DB.dbname,
-        doc_type=doctype,
-        body = query
-    )
-    if res['hits']['hits']:
-        doc = res['hits']['hits'][0]['_source']
+    query["query"]["bool"]["must"] = [
+        rangeQuery,
+        {"term": {"organisation": dOrg}},
+        {"term": {where: who}},
+    ]
+    query["size"] = 1
+    query["sort"] = [{which: "asc"}]
+
+    res = session.DB.ES.search(index=session.DB.dbname, doc_type=doctype, body=query)
+    if res["hits"]["hits"]:
+        doc = res["hits"]["hits"][0]["_source"]
         if doc[which] >= when:
             return [doc[which], doc]
         else:
@@ -128,14 +106,12 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
+    dOrg = session.user["defaultOrganisation"] or "apache"
 
     # Keep track of all contributors, and newcomers
     contributors = []
@@ -144,170 +120,120 @@ def run(API, environ, indata, session):
     ####################################################################
     # Start by grabbing all contributors this period via terms agg     #
     ####################################################################
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-
-
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
     ############################
     # CODE NEWTIMERS           #
     ############################
-    rangeKey = 'ts'
-    rangeQuery = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateFrom,
-                            'to': dateTo
-                        }
-                    }
-                }
+    rangeKey = "ts"
+    rangeQuery = {"range": {rangeKey: {"from": dateFrom, "to": dateTo}}}
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            rangeQuery,
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
-            }
-
-    query['aggs'] = {
-        'by_committer': {
-            'terms': {
-                'field': 'committer_email',
-                'size': 500
-            }
-        },
-        'by_author': {
-            'terms': {
-                'field': 'author_email',
-                'size': 500
-            }
-        }
+        "query": {"bool": {"must": [rangeQuery, {"term": {"organisation": dOrg}}]}}
+    }
+
+    query["aggs"] = {
+        "by_committer": {"terms": {"field": "committer_email", "size": 500}},
+        "by_author": {"terms": {"field": "author_email", "size": 500}},
     }
 
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
 
     code_contributors = []
-    for bucket in res['aggregations']['by_committer']['buckets']:
-        email = bucket['key']
+    for bucket in res["aggregations"]["by_committer"]["buckets"]:
+        email = bucket["key"]
         if email not in code_contributors:
             code_contributors.append(email)
 
-    for bucket in res['aggregations']['by_author']['buckets']:
-        email = bucket['key']
+    for bucket in res["aggregations"]["by_author"]["buckets"]:
+        email = bucket["key"]
         if email not in code_contributors:
             code_contributors.append(email)
 
     # Now, for each contributor, find if they have done anything before
     for email in code_contributors:
-        ea = find_earlier(session, query, dateFrom, email, 'ts', 'author_email', 'code_commit', dOrg)
-        ec = find_earlier(session, query, dateFrom, email, 'ts', 'committer_email', 'code_commit', dOrg)
+        ea = find_earlier(
+            session, query, dateFrom, email, "ts", "author_email", "code_commit", dOrg
+        )
+        ec = find_earlier(
+            session,
+            query,
+            dateFrom,
+            email,
+            "ts",
+            "committer_email",
+            "code_commit",
+            dOrg,
+        )
         if ea[0] != -1 and ec[0] != -1:
             earliest = ea
             if earliest[0] == -1 or (earliest[0] > ec[0] and ec[0] != -1):
                 earliest = ec
-            newcomers[email] = {
-                'code': earliest
-            }
-
-
+            newcomers[email] = {"code": earliest}
 
     ############################
     # ISSUE NEWTIMERS          #
     ############################
-    rangeKey = 'created'
-    rangeQuery = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateFrom,
-                            'to': dateTo
-                        }
-                    }
-                }
+    rangeKey = "created"
+    rangeQuery = {"range": {rangeKey: {"from": dateFrom, "to": dateTo}}}
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            rangeQuery,
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
-            }
-
-    query['aggs'] = {
-        'by_creator': {
-            'terms': {
-                'field': 'issueCreator',
-                'size': 500
-            }
-        },
-        'by_closer': {
-            'terms': {
-                'field': 'issueCloser',
-                'size': 500
-            }
-        }
+        "query": {"bool": {"must": [rangeQuery, {"term": {"organisation": dOrg}}]}}
+    }
+
+    query["aggs"] = {
+        "by_creator": {"terms": {"field": "issueCreator", "size": 500}},
+        "by_closer": {"terms": {"field": "issueCloser", "size": 500}},
     }
 
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
-    res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
+    res = session.DB.ES.search(index=session.DB.dbname, doc_type="issue", body=query)
 
     issue_contributors = []
-    for bucket in res['aggregations']['by_creator']['buckets']:
-        email = bucket['key']
+    for bucket in res["aggregations"]["by_creator"]["buckets"]:
+        email = bucket["key"]
         if email not in issue_contributors:
             issue_contributors.append(email)
 
-    for bucket in res['aggregations']['by_closer']['buckets']:
-        email = bucket['key']
+    for bucket in res["aggregations"]["by_closer"]["buckets"]:
+        email = bucket["key"]
         if email not in issue_contributors:
             issue_contributors.append(email)
 
     # Now, for each contributor, find if they have done anything before
     for email in issue_contributors:
-        ecr = find_earlier(session, query, dateFrom, email, 'created', 'issueCreator', 'issue', dOrg)
-        ecl = find_earlier(session, query, dateFrom, email, 'closed', 'issueCloser', 'issue', dOrg)
+        ecr = find_earlier(
+            session, query, dateFrom, email, "created", "issueCreator", "issue", dOrg
+        )
+        ecl = find_earlier(
+            session, query, dateFrom, email, "closed", "issueCloser", "issue", dOrg
+        )
         if ecr[0] != -1 and ecl[0] != -1:
             earliest = ecr
             if earliest[0] == -1 or (earliest[0] > ecl[0] and ecl[0] != -1):
                 earliest = ecl
             newcomers[email] = newcomers.get(email, {})
-            newcomers[email]['issue'] = earliest
+            newcomers[email]["issue"] = earliest
 
     email_contributors = []
 
@@ -316,12 +242,18 @@ def run(API, environ, indata, session):
     ################################
 
     for email in newcomers:
-        pid = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('ascii', errors='replace')).hexdigest()
+        pid = hashlib.sha1(
+            ("%s%s" % (dOrg, email)).encode("ascii", errors="replace")
+        ).hexdigest()
         person = {}
-        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = pid):
-            person = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = pid)['_source']
-        person['md5'] = hashlib.md5(person['email'].encode('utf-8')).hexdigest() # gravatar needed for UI!
-        newcomers[email]['bio'] = person
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=pid):
+            person = session.DB.ES.get(
+                index=session.DB.dbname, doc_type="person", id=pid
+            )["_source"]
+        person["md5"] = hashlib.md5(
+            person["email"].encode("utf-8")
+        ).hexdigest()  # gravatar needed for UI!
+        newcomers[email]["bio"] = person
 
     newcomers_code = []
     newcomers_issues = []
@@ -329,30 +261,21 @@ def run(API, environ, indata, session):
 
     # Count newcomers in each category (TODO: put this elsewhere earlier)
     for email, entry in newcomers.items():
-        if 'code' in entry:
+        if "code" in entry:
             newcomers_code.append(email)
-        if 'issue' in entry:
+        if "issue" in entry:
             newcomers_issues.append(email)
-        if 'email' in entry:
+        if "email" in entry:
             newcomers_email.append(email)
 
     JSON_OUT = {
-        'okay': True,
-        'stats': {
-            'code': {
-                'newcomers': newcomers_code,
-                'seen': len(code_contributors),
-            },
-            'issues': {
-                'newcomers': newcomers_issues,
-                'seen': len(issue_contributors),
-            },
-            'email': {
-                'newcomers': newcomers_email,
-                'seen': len(email_contributors),
-            }
+        "okay": True,
+        "stats": {
+            "code": {"newcomers": newcomers_code, "seen": len(code_contributors)},
+            "issues": {"newcomers": newcomers_issues, "seen": len(issue_contributors)},
+            "email": {"newcomers": newcomers_email, "seen": len(email_contributors)},
         },
-        'bios': newcomers,
-        'responseTime': time.time() - now
+        "bios": newcomers,
+        "responseTime": time.time() - now,
     }
-    yield json.dumps(JSON_OUT, indent = 2)
+    yield json.dumps(JSON_OUT, indent=2)
diff --git a/kibble/api/pages/bio/trends.py b/kibble/api/pages/bio/trends.py
index 18b84b7..d8dffa6 100644
--- a/kibble/api/pages/bio/trends.py
+++ b/kibble/api/pages/bio/trends.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the contributor trends renderer for Kibble
 """
@@ -71,6 +68,7 @@ This is the contributor trends renderer for Kibble
 import json
 import time
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -81,20 +79,20 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
     if dateFrom < 0:
         dateFrom = 0
     dateYonder = dateFrom - (dateTo - dateFrom)
 
-
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
 
     ####################################################################
     # We start by doing all the queries for THIS period.               #
@@ -102,111 +100,61 @@ def run(API, environ, indata, session):
     # and rerun the same queries.                                      #
     ####################################################################
 
-    rangeKey = 'created'
-    rangeQuery = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateFrom,
-                            'to': dateTo
-                        }
-                    }
-                }
+    rangeKey = "created"
+    rangeQuery = {"range": {rangeKey: {"from": dateFrom, "to": dateTo}}}
     # ISSUES OPENED
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            rangeQuery,
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
-            }
+        "query": {"bool": {"must": [rangeQuery, {"term": {"organisation": dOrg}}]}}
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        codeKey = 'committer_email' if not indata.get('author') else 'author_email'
-        query['query']['bool']['should'] = [
-            {'term': {'issueCreator': indata.get('email')}},
-            {'term': {'issueCloser': indata.get('email')}},
-            {'term': {'sender': indata.get('email')}},
-            {'term': {codeKey: indata.get('email')}},
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        codeKey = "committer_email" if not indata.get("author") else "author_email"
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+            {"term": {"sender": indata.get("email")}},
+            {"term": {codeKey: indata.get("email")}},
         ]
-        query['query']['bool']['minimum_should_match'] = 1
-
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # ISSUES CREATED
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_created = res['count']
-
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_created = res["count"]
 
     # ISSUES CLOSED
     rangeKey = "closed"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateFrom,
-                            'to': dateTo
-                        }
-                    }
-                }
-
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_closed = res['count']
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateFrom, "to": dateTo}}
+    }
 
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_closed = res["count"]
 
     # EMAIL SENT
     rangeKey = "ts"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateFrom,
-                            'to': dateTo
-                        }
-                    }
-                }
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateFrom, "to": dateTo}}
+    }
 
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="email",
-            body = query
-        )
-    no_email_sent = res['count']
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query)
+    no_email_sent = res["count"]
 
     # COMMITS MADE
     rangeKey = "ts"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateFrom,
-                            'to': dateTo
-                        }
-                    }
-                }
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateFrom, "to": dateTo}}
+    }
 
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
-    no_commits = res['count']
-
-
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
+    no_commits = res["count"]
 
     ####################################################################
     # Change to PRIOR SPAN                                             #
@@ -214,108 +162,64 @@ def run(API, environ, indata, session):
 
     # ISSUES OPENED
     rangeKey = "created"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateYonder,
-                            'to': dateFrom-1
-                        }
-                    }
-                }
-
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_created_before = res['count']
-
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}}
+    }
 
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_created_before = res["count"]
 
     # ISSUES CLOSED
     rangeKey = "closed"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateYonder,
-                            'to': dateFrom-1
-                        }
-                    }
-                }
-
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_closed_before = res['count']
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}}
+    }
 
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_closed_before = res["count"]
 
     # EMAIL SENT
     rangeKey = "ts"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateYonder,
-                            'to': dateFrom-1
-                        }
-                    }
-                }
-
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}}
+    }
 
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="email",
-            body = query
-        )
-    no_email_sent_before = res['count']
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="email", body=query)
+    no_email_sent_before = res["count"]
 
     # CODE COMMITS
     rangeKey = "ts"
-    query['query']['bool']['must'][0] = {'range':
-                    {
-                        rangeKey: {
-                            'from': dateYonder,
-                            'to': dateFrom-1
-                        }
-                    }
-                }
-
+    query["query"]["bool"]["must"][0] = {
+        "range": {rangeKey: {"from": dateYonder, "to": dateFrom - 1}}
+    }
 
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
-    no_commits_before = res['count']
-
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
+    no_commits_before = res["count"]
 
     trends = {
         "created": {
-            'before': no_issues_created_before,
-            'after': no_issues_created,
-            'title': "Issues opened this period"
+            "before": no_issues_created_before,
+            "after": no_issues_created,
+            "title": "Issues opened this period",
         },
         "closed": {
-            'before': no_issues_closed_before,
-            'after': no_issues_closed,
-            'title': "Issues closed this period"
+            "before": no_issues_closed_before,
+            "after": no_issues_closed,
+            "title": "Issues closed this period",
         },
         "email": {
-            'before': no_email_sent_before,
-            'after': no_email_sent,
-            'title': "Emails sent this period"
+            "before": no_email_sent_before,
+            "after": no_email_sent,
+            "title": "Emails sent this period",
         },
         "code": {
-            'before': no_commits_before,
-            'after': no_commits,
-            'title': "Commits this period"
-        }
+            "before": no_commits_before,
+            "after": no_commits,
+            "title": "Commits this period",
+        },
     }
 
-    JSON_OUT = {
-        'trends': trends,
-        'okay': True,
-        'responseTime': time.time() - now
-    }
+    JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/ci/queue.py b/kibble/api/pages/ci/queue.py
index b64c6cc..d539bee 100644
--- a/kibble/api/pages/ci/queue.py
+++ b/kibble/api/pages/ci/queue.py
@@ -61,7 +61,6 @@
 ########################################################################
 
 
-
 """
 This is the CI queue timeseries renderer for Kibble
 """
@@ -70,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -80,132 +80,98 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
     # We only want build sources, so we can sum up later.
-    viewList = session.subType(['jenkins', 'travis', 'buildbot'], viewList)
+    viewList = session.subType(["jenkins", "travis", "buildbot"], viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'time': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"time": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        viewList = [indata.get('source')]
+    if indata.get("source"):
+        viewList = [indata.get("source")]
 
-    query['query']['bool']['must'].append({'term': {'sourceID': 'x'}})
+    query["query"]["bool"]["must"].append({"term": {"sourceID": "x"}})
 
     timeseries = []
     for source in viewList:
-        query['query']['bool']['must'][2] = {'term': {'sourceID': source}}
+        query["query"]["bool"]["must"][2] = {"term": {"sourceID": source}}
 
         # Get queue stats
-        query['aggs'] = {
-                'timeseries': {
-                    'date_histogram': {
-                        'field': 'date',
-                        'interval': interval
-                    },
-                    'aggs': {
-                        'size': {
-                            'avg': {
-                                'field': 'size'
-                            }
-                        },
-                        'blocked': {
-                            'avg': {
-                                'field': 'blocked'
-                            }
-                        },
-                        'building': {
-                            'avg': {
-                                'field': 'building'
-                            }
-                        },
-                        'stuck': {
-                            'avg': {
-                                'field': 'stuck'
-                            }
-                        },
-                        'wait': {
-                            'avg': {
-                                'field': 'avgwait'
-                            }
-                        }
-                    }
-                }
+        query["aggs"] = {
+            "timeseries": {
+                "date_histogram": {"field": "date", "interval": interval},
+                "aggs": {
+                    "size": {"avg": {"field": "size"}},
+                    "blocked": {"avg": {"field": "blocked"}},
+                    "building": {"avg": {"field": "building"}},
+                    "stuck": {"avg": {"field": "stuck"}},
+                    "wait": {"avg": {"field": "avgwait"}},
+                },
             }
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="ci_queue",
-                size = 0,
-                body = query
-            )
-
-        for bucket in res['aggregations']['timeseries']['buckets']:
-            ts = int(bucket['key'] / 1000)
-            bucket['wait']['value'] = bucket['wait'].get('value', 0) or 0
-            if bucket['doc_count'] == 0:
+            index=session.DB.dbname, doc_type="ci_queue", size=0, body=query
+        )
+
+        for bucket in res["aggregations"]["timeseries"]["buckets"]:
+            ts = int(bucket["key"] / 1000)
+            bucket["wait"]["value"] = bucket["wait"].get("value", 0) or 0
+            if bucket["doc_count"] == 0:
                 continue
 
             found = False
             for t in timeseries:
-                if t['date'] == ts:
+                if t["date"] == ts:
                     found = True
-                    t['queue size'] += bucket['size']['value']
-                    t['builds running'] += bucket['building']['value']
-                    t['average wait (hours)'] += bucket['wait']['value']
-                    t['builders'] += 1
+                    t["queue size"] += bucket["size"]["value"]
+                    t["builds running"] += bucket["building"]["value"]
+                    t["average wait (hours)"] += bucket["wait"]["value"]
+                    t["builders"] += 1
             if not found:
-                timeseries.append({
-                    'date': ts,
-                    'queue size': bucket['size']['value'],
-                    'builds running': bucket['building']['value'],
-                    'average wait (hours)': bucket['wait']['value'],
-                    'builders': 1,
-                })
+                timeseries.append(
+                    {
+                        "date": ts,
+                        "queue size": bucket["size"]["value"],
+                        "builds running": bucket["building"]["value"],
+                        "average wait (hours)": bucket["wait"]["value"],
+                        "builders": 1,
+                    }
+                )
 
     for t in timeseries:
-        t['average wait (hours)'] = int(t['average wait (hours)']/360)/10.0
-        del t['builders']
+        t["average wait (hours)"] = int(t["average wait (hours)"] / 360) / 10.0
+        del t["builders"]
 
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'line',  # Recommendation for the UI
-            'nofill': True
+        "widgetType": {
+            "chartType": "line",  # Recommendation for the UI
+            "nofill": True,
         },
-        'timeseries': timeseries,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now
+        "timeseries": timeseries,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/ci/status.py b/kibble/api/pages/ci/status.py
index 1629bc6..954c461 100644
--- a/kibble/api/pages/ci/status.py
+++ b/kibble/api/pages/ci/status.py
@@ -61,7 +61,6 @@
 ########################################################################
 
 
-
 """
 This is the CI queue status (blocked/stuck) timeseries renderer for Kibble
 """
@@ -70,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -80,104 +80,73 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'time': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"time": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get queue stats
-    query['aggs'] = {
-            'timeseries': {
-                'date_histogram': {
-                    'field': 'date',
-                    'interval': interval
-                },
-                'aggs': {
-                    'size': {
-                        'avg': {
-                            'field': 'size'
-                        }
-                    },
-                    'blocked': {
-                        'avg': {
-                            'field': 'blocked'
-                        }
-                    },
-                    'stuck': {
-                        'avg': {
-                            'field': 'stuck'
-                        }
-                    },
-                    'wait': {
-                        'avg': {
-                            'field': 'avgwait'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "timeseries": {
+            "date_histogram": {"field": "date", "interval": interval},
+            "aggs": {
+                "size": {"avg": {"field": "size"}},
+                "blocked": {"avg": {"field": "blocked"}},
+                "stuck": {"avg": {"field": "stuck"}},
+                "wait": {"avg": {"field": "avgwait"}},
+            },
         }
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="ci_queue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="ci_queue", size=0, body=query
+    )
 
     timeseries = []
-    for bucket in res['aggregations']['timeseries']['buckets']:
-        if bucket['doc_count'] == 0:
+    for bucket in res["aggregations"]["timeseries"]["buckets"]:
+        if bucket["doc_count"] == 0:
             continue
-        ts = int(bucket['key'] / 1000)
-        timeseries.append({
-            'date': ts,
-            'builds blocked': bucket['blocked']['value'],
-            'builds stuck': bucket['stuck']['value']
-        })
+        ts = int(bucket["key"] / 1000)
+        timeseries.append(
+            {
+                "date": ts,
+                "builds blocked": bucket["blocked"]["value"],
+                "builds stuck": bucket["stuck"]["value"],
+            }
+        )
 
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'bar'  # Recommendation for the UI
-        },
-        'timeseries': timeseries,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now
+        "widgetType": {"chartType": "bar"},  # Recommendation for the UI
+        "timeseries": timeseries,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/ci/top-buildcount.py b/kibble/api/pages/ci/top-buildcount.py
index 96e12cb..52a59e9 100644
--- a/kibble/api/pages/ci/top-buildcount.py
+++ b/kibble/api/pages/ci/top-buildcount.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN CI jobs by total build time renderer for Kibble
 """
@@ -72,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,96 +80,76 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'date': {
-                                        'from': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom)),
-                                        'to': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateTo))
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
+        "query": {
+            "bool": {
+                "must": [
+                    {
+                        "range": {
+                            "date": {
+                                "from": time.strftime(
+                                    "%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom)
+                                ),
+                                "to": time.strftime(
+                                    "%Y/%m/%d %H:%M:%S", time.gmtime(dateTo)
+                                ),
                             }
-                        ]
-                    }
-                }
-            }
-    # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
-    elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-
-    query['aggs'] = {
-        'by_job': {
-                'terms': {
-                    'field': 'jobURL.keyword',
-                    'size': 5000,
-                },
-                'aggs': {
-                    'duration': {
-                        'sum': {
-                            'field': 'duration'
-                        }
-                    },
-                    'ci': {
-                        'terms': {
-                            'field': 'ci.keyword',
-                            'size': 1
                         }
                     },
-                    'name': {
-                        'terms': {
-                            'field': 'job.keyword',
-                            'size': 1
-                        }
-                    }
-                }
+                    {"term": {"organisation": dOrg}},
+                ]
             }
         }
+    }
+    # Source-specific or view-specific??
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
+    elif viewList:
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+
+    query["aggs"] = {
+        "by_job": {
+            "terms": {"field": "jobURL.keyword", "size": 5000},
+            "aggs": {
+                "duration": {"sum": {"field": "duration"}},
+                "ci": {"terms": {"field": "ci.keyword", "size": 1}},
+                "name": {"terms": {"field": "job.keyword", "size": 1}},
+            },
+        }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="ci_build",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="ci_build", size=0, body=query
+    )
 
     jobs = []
-    for doc in res['aggregations']['by_job']['buckets']:
-        job = doc['key']
-        builds = doc['doc_count']
-        duration = doc['duration']['value']
-        ci = doc['ci']['buckets'][0]['key']
-        jobname = doc['name']['buckets'][0]['key']
+    for doc in res["aggregations"]["by_job"]["buckets"]:
+        job = doc["key"]
+        builds = doc["doc_count"]
+        duration = doc["duration"]["value"]
+        ci = doc["ci"]["buckets"][0]["key"]
+        jobname = doc["name"]["buckets"][0]["key"]
         jobs.append([builds, duration, jobname, ci])
 
-    topjobs = sorted(jobs, key = lambda x: int(x[0]), reverse = True)
+    topjobs = sorted(jobs, key=lambda x: int(x[0]), reverse=True)
     tophash = {}
     for v in topjobs:
         tophash["%s (%s)" % (v[2], v[3])] = v[0]
 
-    JSON_OUT = {
-        'counts': tophash,
-        'okay': True,
-        'responseTime': time.time() - now,
-    }
+    JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/ci/top-buildtime.py b/kibble/api/pages/ci/top-buildtime.py
index a9481ee..f3a8c80 100644
--- a/kibble/api/pages/ci/top-buildtime.py
+++ b/kibble/api/pages/ci/top-buildtime.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN CI jobs by total build time renderer for Kibble
 """
@@ -72,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,103 +80,83 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'date': {
-                                        'from': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom)),
-                                        'to': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(dateTo))
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
+        "query": {
+            "bool": {
+                "must": [
+                    {
+                        "range": {
+                            "date": {
+                                "from": time.strftime(
+                                    "%Y/%m/%d %H:%M:%S", time.gmtime(dateFrom)
+                                ),
+                                "to": time.strftime(
+                                    "%Y/%m/%d %H:%M:%S", time.gmtime(dateTo)
+                                ),
                             }
-                        ]
-                    }
-                }
-            }
-    # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
-    elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-
-    query['aggs'] = {
-        'by_job': {
-                'terms': {
-                    'field': 'jobURL.keyword',
-                    'size': 5000,
-                },
-                'aggs': {
-                    'duration': {
-                        'sum': {
-                            'field': 'duration'
-                        }
-                    },
-                    'ci': {
-                        'terms': {
-                            'field': 'ci.keyword',
-                            'size': 1
                         }
                     },
-                    'name': {
-                        'terms': {
-                            'field': 'job.keyword',
-                            'size': 1
-                        }
-                    }
-                }
+                    {"term": {"organisation": dOrg}},
+                ]
             }
         }
+    }
+    # Source-specific or view-specific??
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
+    elif viewList:
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+
+    query["aggs"] = {
+        "by_job": {
+            "terms": {"field": "jobURL.keyword", "size": 5000},
+            "aggs": {
+                "duration": {"sum": {"field": "duration"}},
+                "ci": {"terms": {"field": "ci.keyword", "size": 1}},
+                "name": {"terms": {"field": "job.keyword", "size": 1}},
+            },
+        }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="ci_build",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="ci_build", size=0, body=query
+    )
 
     jobs = []
-    for doc in res['aggregations']['by_job']['buckets']:
-        job = doc['key']
-        builds = doc['doc_count']
-        duration = doc['duration']['value']
-        ci = doc['ci']['buckets'][0]['key']
-        jobname = doc['name']['buckets'][0]['key']
+    for doc in res["aggregations"]["by_job"]["buckets"]:
+        job = doc["key"]
+        builds = doc["doc_count"]
+        duration = doc["duration"]["value"]
+        ci = doc["ci"]["buckets"][0]["key"]
+        jobname = doc["name"]["buckets"][0]["key"]
         jobs.append([builds, duration, jobname, ci])
 
-    topjobs = sorted(jobs, key = lambda x: int(x[1]), reverse = True)
+    topjobs = sorted(jobs, key=lambda x: int(x[1]), reverse=True)
     top = topjobs[0:24]
     if len(topjobs) > 25:
         count = 0
         for repo in topjobs[24:]:
             count += repo[1]
-        top.append([1, count, "Other jobs", '??'])
+        top.append([1, count, "Other jobs", "??"])
 
     tophash = {}
     for v in top:
-        tophash["%s (%s)" % (v[2], v[3])] = int((v[1]/360000))/10
+        tophash["%s (%s)" % (v[2], v[3])] = int((v[1] / 360000)) / 10
 
-    JSON_OUT = {
-        'counts': tophash,
-        'okay': True,
-        'responseTime': time.time() - now,
-    }
+    JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/changes.py b/kibble/api/pages/code/changes.py
index 6365d53..83fd305 100644
--- a/kibble/api/pages/code/changes.py
+++ b/kibble/api/pages/code/changes.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the code changes timeseries renderer for Kibble
 """
@@ -71,6 +68,7 @@ This is the code changes timeseries renderer for Kibble
 import json
 import time
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -81,114 +79,90 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    which = 'committer_email'
-    role = 'committer'
-    if indata.get('author', False):
-        which = 'author_email'
-        role = 'author'
-
-    interval = indata.get('interval', 'day')
+    which = "committer_email"
+    role = "committer"
+    if indata.get("author", False):
+        which = "author_email"
+        role = "author"
 
+    interval = indata.get("interval", "day")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"committer_email": indata.get("email")}},
+            {"term": {"author_email": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'date',
-                    'interval': interval
-                },
-                'aggs': {
-                    'insertions': {
-                        'sum': {
-                            'field': 'insertions'
-                        }
-                    },
-                    'deletions': {
-                        'sum': {
-                            'field': 'deletions'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "date", "interval": interval},
+            "aggs": {
+                "insertions": {"sum": {"field": "insertions"}},
+                "deletions": {"sum": {"field": "deletions"}},
+            },
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     timeseries = []
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        icount = bucket['insertions']['value']
-        dcount = bucket['deletions']['value']
-        timeseries.append({
-            'date': ts,
-            'insertions': icount,
-            'deletions': dcount
-        })
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        icount = bucket["insertions"]["value"]
+        dcount = bucket["deletions"]["value"]
+        timeseries.append({"date": ts, "insertions": icount, "deletions": dcount})
 
     JSON_OUT = {
-        'timeseries': timeseries,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'area'
-        }
+        "timeseries": timeseries,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "area"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/commits.py b/kibble/api/pages/code/commits.py
index 2b45038..c5377df 100644
--- a/kibble/api/pages/code/commits.py
+++ b/kibble/api/pages/code/commits.py
@@ -61,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN committers list renderer for Kibble
 """
@@ -72,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,99 +80,82 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    which = 'committer_email'
-    role = 'committer'
-    if indata.get('author', False):
-        which = 'author_email'
-        role = 'author'
-
-    interval = indata.get('interval', 'day')
+    which = "committer_email"
+    role = "committer"
+    if indata.get("author", False):
+        which = "author_email"
+        role = "author"
 
+    interval = indata.get("interval", "day")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"committer_email": indata.get("email")}},
+            {"term": {"author_email": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get number of committers, this period
-    query['aggs'] = {
-            'commits': {
-                'date_histogram': {
-                    'field': 'date',
-                    'interval': interval
-                }
-            }
-        }
+    query["aggs"] = {
+        "commits": {"date_histogram": {"field": "date", "interval": interval}}
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     timeseries = []
-    for bucket in res['aggregations']['commits']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        count = bucket['doc_count']
-        timeseries.append({
-            'date': ts,
-            'commits': count
-        })
+    for bucket in res["aggregations"]["commits"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        count = bucket["doc_count"]
+        timeseries.append({"date": ts, "commits": count})
 
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'bar'  # Recommendation for the UI
-        },
-        'timeseries': timeseries,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now
+        "widgetType": {"chartType": "bar"},  # Recommendation for the UI
+        "timeseries": timeseries,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/committers.py b/kibble/api/pages/code/committers.py
index b48ae25..4bc0d36 100644
--- a/kibble/api/pages/code/committers.py
+++ b/kibble/api/pages/code/committers.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN committers list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,187 +80,137 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    which = 'committer_email'
-    role = 'committer'
-    if indata.get('author', False):
-        which = 'author_email'
-        role = 'author'
-
-    interval = indata.get('interval', 'month')
+    which = "committer_email"
+    role = "committer"
+    if indata.get("author", False):
+        which = "author_email"
+        role = "author"
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"committer_email": indata.get("email")}},
+            {"term": {"author_email": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'committers': {
-                'terms': {
-                    'field': which,
-                    'size': 25
-                },
-                'aggs': {
-                'byinsertions': {
-                    'terms': {
-                        'field': which
-                    },
-                    'aggs': {
-                        'stats': {
-                            'sum': {
-                                'field': "insertions"
-                            }
-                        }
-                    }
+    query["aggs"] = {
+        "committers": {
+            "terms": {"field": which, "size": 25},
+            "aggs": {
+                "byinsertions": {
+                    "terms": {"field": which},
+                    "aggs": {"stats": {"sum": {"field": "insertions"}}},
                 },
-                'bydeletions': {
-                    'terms': {
-                        'field': which
-                    },
-                    'aggs': {
-                        'stats': {
-                            'sum': {
-                                'field': "deletions"
-                            }
-                        }
-                    }
+                "bydeletions": {
+                    "terms": {"field": which},
+                    "aggs": {"stats": {"sum": {"field": "deletions"}}},
                 },
-            }
             },
-
         }
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     people = {}
-    for bucket in res['aggregations']['committers']['buckets']:
-        email = bucket['key']
-        count = bucket['doc_count']
-        sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest()
-        if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha):
-            pres = session.DB.ES.get(
-                index=session.DB.dbname,
-                doc_type="person",
-                id = sha
-                )
-            person = pres['_source']
-            person['name'] = person.get('name', 'unknown')
+    for bucket in res["aggregations"]["committers"]["buckets"]:
+        email = bucket["key"]
+        count = bucket["doc_count"]
+        sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest()
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
+            pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha)
+            person = pres["_source"]
+            person["name"] = person.get("name", "unknown")
             people[email] = person
-            people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest()
-            people[email]['count'] = count
-            people[email]['subcount'] = {
-                'insertions': int(bucket['byinsertions']['buckets'][0]['stats']['value']),
-                'deletions': int(bucket['bydeletions']['buckets'][0]['stats']['value'])
+            people[email]["gravatar"] = hashlib.md5(
+                person.get("email", "unknown").encode("utf-8")
+            ).hexdigest()
+            people[email]["count"] = count
+            people[email]["subcount"] = {
+                "insertions": int(
+                    bucket["byinsertions"]["buckets"][0]["stats"]["value"]
+                ),
+                "deletions": int(bucket["bydeletions"]["buckets"][0]["stats"]["value"]),
             }
 
     topN = []
     for email, person in people.items():
         topN.append(person)
-    topN = sorted(topN, key = lambda x: x['count'], reverse = True)
+    topN = sorted(topN, key=lambda x: x["count"], reverse=True)
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'date',
-                    'interval': interval
-                },
-                'aggs': {
-                    'by_committer': {
-                        'cardinality': {
-                            'field': 'committer_email'
-                        }
-                    },
-                    'by_author': {
-                        'cardinality': {
-                            'field': 'author_email'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "date", "interval": interval},
+            "aggs": {
+                "by_committer": {"cardinality": {"field": "committer_email"}},
+                "by_author": {"cardinality": {"field": "author_email"}},
+            },
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     timeseries = []
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        ccount = bucket['by_committer']['value']
-        acount = bucket['by_author']['value']
-        timeseries.append({
-            'date': ts,
-            'committers': ccount,
-            'authors': acount
-        })
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        ccount = bucket["by_committer"]["value"]
+        acount = bucket["by_author"]["value"]
+        timeseries.append({"date": ts, "committers": ccount, "authors": acount})
 
     JSON_OUT = {
-        'topN': {
-            'denoter': 'commits',
-            'items': topN
-        },
-        'timeseries': timeseries,
-        'sorted': people,
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar'
-        }
+        "topN": {"denoter": "commits", "items": topN},
+        "timeseries": timeseries,
+        "sorted": people,
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/evolution.py b/kibble/api/pages/code/evolution.py
index dd33c12..0fa9cdc 100644
--- a/kibble/api/pages/code/evolution.py
+++ b/kibble/api/pages/code/evolution.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN committers list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,94 +80,80 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
     breakdown = False
     onlycode = False
 
-
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'time': {
-                                        'from': 0,
-                                        'to': int(time.time())
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"time": {"from": 0, "to": int(time.time())}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # We need scrolling here!
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="evolution",
-            scroll = '2m',
-            size = 5000,
-            body = query
-        )
-    sid = res['_scroll_id']
-    scroll_size = res['hits']['total']
+        index=session.DB.dbname,
+        doc_type="evolution",
+        scroll="2m",
+        size=5000,
+        body=query,
+    )
+    sid = res["_scroll_id"]
+    scroll_size = res["hits"]["total"]
     if type(scroll_size) is dict:
-        scroll_size = scroll_size['value'] # ES >= 7.x
+        scroll_size = scroll_size["value"]  # ES >= 7.x
 
     timeseries = []
     tstmp = {}
 
-    while (scroll_size > 0):
-        for doc in res['hits']['hits']:
-            updates = doc['_source']
-            ts = updates['time'] #round(updates['time']/86400) * 86400
-            if updates['time'] % 86400 != 0:
+    while scroll_size > 0:
+        for doc in res["hits"]["hits"]:
+            updates = doc["_source"]
+            ts = updates["time"]  # round(updates['time']/86400) * 86400
+            if updates["time"] % 86400 != 0:
                 continue
             tstmp[ts] = tstmp.get(ts, {})
             item = tstmp[ts]
             if breakdown:
                 pass
             else:
-                item['code'] = item.get('code', 0) + (updates['loc'] or 0)
-                item['comments'] = item.get('comments', 0) + (updates['comments'] or 0)
-                item['blanks'] = item.get('blanks', 0) + (updates['blank'] or 0)
+                item["code"] = item.get("code", 0) + (updates["loc"] or 0)
+                item["comments"] = item.get("comments", 0) + (updates["comments"] or 0)
+                item["blanks"] = item.get("blanks", 0) + (updates["blank"] or 0)
 
-        res = session.DB.ES.scroll(scroll_id = sid, scroll = '1m')
-        sid = res['_scroll_id']
-        scroll_size = len(res['hits']['hits'])
+        res = session.DB.ES.scroll(scroll_id=sid, scroll="1m")
+        sid = res["_scroll_id"]
+        scroll_size = len(res["hits"]["hits"])
 
     for k, v in tstmp.items():
-        v['date'] = k
+        v["date"] = k
         timeseries.append(v)
 
-    timeseries = sorted(timeseries, key = lambda x: x['date'])
+    timeseries = sorted(timeseries, key=lambda x: x["date"])
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'line',  # Recommendation for the UI
-            'stack': True
-        },
-        'timeseries': timeseries,
-        'sortOrder': ['code', 'comments', 'blanks'],
-        'okay': True,
-        'responseTime': time.time() - now
+        "widgetType": {"chartType": "line", "stack": True},  # Recommendation for the UI
+        "timeseries": timeseries,
+        "sortOrder": ["code", "comments", "blanks"],
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/pony-timeseries.py b/kibble/api/pages/code/pony-timeseries.py
index 2a27c00..8e1d254 100644
--- a/kibble/api/pages/code/pony-timeseries.py
+++ b/kibble/api/pages/code/pony-timeseries.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the pony factor renderer for Kibble
 """
@@ -75,6 +71,7 @@ import re
 import datetime
 import dateutil.relativedelta
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -85,13 +82,12 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    hl = indata.get('span', 24)
+    hl = indata.get("span", 24)
     tnow = datetime.date.today()
     nm = tnow.month - (tnow.month % 3)
     ny = tnow.year
@@ -111,107 +107,84 @@ def run(API, environ, indata, session):
             nm += 12
             ny = ny - 1
 
-
         ####################################################################
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'tsday': {
-                                            'from': tf,
-                                            'to': t
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"tsday": {"from": tf, "to": t}}},
+                        {"term": {"organisation": dOrg}},
+                    ]
                 }
+            }
+        }
         # Source-specific or view-specific??
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
         elif viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
         # Get an initial count of commits
         res = session.DB.ES.count(
-                index=session.DB.dbname,
-                doc_type="code_commit",
-                body = query
-            )
+            index=session.DB.dbname, doc_type="code_commit", body=query
+        )
 
-        globcount = res['count']
+        globcount = res["count"]
         if globcount == 0:
             break
 
         # Get top 25 committers this period
-        query['aggs'] = {
-                'by_committer': {
-                    'terms': {
-                        'field': 'committer_email',
-                        'size': 1000
-                    }
-                },
-                'by_author': {
-                    'terms': {
-                        'field': 'author_email',
-                        'size': 1000
-                    }
-                }
-            }
+        query["aggs"] = {
+            "by_committer": {"terms": {"field": "committer_email", "size": 1000}},
+            "by_author": {"terms": {"field": "author_email", "size": 1000}},
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="code_commit",
-                size = 0,
-                body = query
-            )
-
+            index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+        )
 
         # PF for committers
         pf_committer = 0
         pf_committer_count = 0
-        for bucket in res['aggregations']['by_committer']['buckets']:
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["by_committer"]["buckets"]:
+            count = bucket["doc_count"]
             pf_committer += 1
             pf_committer_count += count
-            if pf_committer_count > int(globcount/2):
+            if pf_committer_count > int(globcount / 2):
                 break
 
         # PF for authors
         pf_author = 0
         pf_author_count = 0
         cpf = {}
-        for bucket in res['aggregations']['by_author']['buckets']:
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["by_author"]["buckets"]:
+            count = bucket["doc_count"]
             pf_author += 1
             pf_author_count += count
-            if '@' in bucket['key']:
-                mldom = bucket['key'].lower().split('@')[-1]
+            if "@" in bucket["key"]:
+                mldom = bucket["key"].lower().split("@")[-1]
                 cpf[mldom] = True
-            if pf_author_count > int(globcount/2):
+            if pf_author_count > int(globcount / 2):
                 break
-        ts.append({
-            'date': t,
-            'Pony Factor (committership)': pf_committer,
-            'Pony Factor (authorship)': pf_author,
-            'Meta-Pony Factor': len(cpf)
-        })
+        ts.append(
+            {
+                "date": t,
+                "Pony Factor (committership)": pf_committer,
+                "Pony Factor (authorship)": pf_author,
+                "Meta-Pony Factor": len(cpf),
+            }
+        )
 
-    ts = sorted(ts, key = lambda x: x['date'])
+    ts = sorted(ts, key=lambda x: x["date"])
 
     JSON_OUT = {
-        'text': "This shows Pony Factors as calculated over a %u month timespan. Authorship measures the people writing the bulk of the codebase, committership mesaures the people committing (merging) the code, and meta-pony is an estimation of how many organisations/companies are involved." % hl,
-        'timeseries': ts,
-        'okay': True,
-        'responseTime': time.time() - now,
+        "text": "This shows Pony Factors as calculated over a %u month timespan. Authorship measures the people writing the bulk of the codebase, committership mesaures the people committing (merging) the code, and meta-pony is an estimation of how many organisations/companies are involved."
+        % hl,
+        "timeseries": ts,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/pony.py b/kibble/api/pages/code/pony.py
index 895a17a..2c5b48d 100644
--- a/kibble/api/pages/code/pony.py
+++ b/kibble/api/pages/code/pony.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the pony factor renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,209 +80,158 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*24)) # Default to a 24 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 24)
+    )  # Default to a 24 month span
     if dateFrom < 0:
         dateFrom = 0
     dateYonder = dateFrom - (dateTo - dateFrom)
 
-
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get an initial count of commits
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
 
-    globcount = res['count']
+    globcount = res["count"]
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'by_committer': {
-                'terms': {
-                    'field': 'committer_email',
-                    'size': 5000
-                }
-            },
-            'by_author': {
-                'terms': {
-                    'field': 'author_email',
-                    'size': 5000
-                }
-            }
-        }
+    query["aggs"] = {
+        "by_committer": {"terms": {"field": "committer_email", "size": 5000}},
+        "by_author": {"terms": {"field": "author_email", "size": 5000}},
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     # PF for committers
     pf_committer = 0
     pf_committer_count = 0
-    for bucket in res['aggregations']['by_committer']['buckets']:
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_committer"]["buckets"]:
+        count = bucket["doc_count"]
         pf_committer += 1
         pf_committer_count += count
-        if pf_committer_count > int(globcount/2):
+        if pf_committer_count > int(globcount / 2):
             break
 
     # PF for authors
     pf_author = 0
     pf_author_count = 0
     cpf = {}
-    for bucket in res['aggregations']['by_author']['buckets']:
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_author"]["buckets"]:
+        count = bucket["doc_count"]
         pf_author += 1
         pf_author_count += count
-        mldom = bucket['key'].lower().split('@')[1]
+        mldom = bucket["key"].lower().split("@")[1]
         cpf[mldom] = True
-        if pf_author_count > int(globcount/2):
+        if pf_author_count > int(globcount / 2):
             break
 
-
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateYonder,
-                                        'to': dateFrom-1
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateYonder, "to": dateFrom - 1}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get an initial count of commits
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
 
-    globcount = res['count']
+    globcount = res["count"]
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'by_committer': {
-                'terms': {
-                    'field': 'committer_email',
-                    'size': 5000
-                }
-            },
-            'by_author': {
-                'terms': {
-                    'field': 'author_email',
-                    'size': 5000
-                }
-            }
-        }
+    query["aggs"] = {
+        "by_committer": {"terms": {"field": "committer_email", "size": 5000}},
+        "by_author": {"terms": {"field": "author_email", "size": 5000}},
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     # PF for committers
     pf_committer_b = 0
     pf_committer_count = 0
-    for bucket in res['aggregations']['by_committer']['buckets']:
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_committer"]["buckets"]:
+        count = bucket["doc_count"]
         pf_committer_b += 1
         pf_committer_count += count
-        if pf_committer_count > int(globcount/2):
+        if pf_committer_count > int(globcount / 2):
             break
 
     # PF for authors
     pf_author_b = 0
     pf_author_count = 0
     cpf_b = {}
-    for bucket in res['aggregations']['by_author']['buckets']:
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_author"]["buckets"]:
+        count = bucket["doc_count"]
         pf_author_b += 1
         pf_author_count += count
-        mldom = bucket['key'].lower().split('@')[1]
+        mldom = bucket["key"].lower().split("@")[1]
         cpf_b[mldom] = True
-        if pf_author_count > int(globcount/2):
+        if pf_author_count > int(globcount / 2):
             break
 
     JSON_OUT = {
-        'factors': [
+        "factors": [
             {
-                'title': "Pony Factor (by committership)",
-                'count': pf_committer,
-                'previous': pf_committer_b
+                "title": "Pony Factor (by committership)",
+                "count": pf_committer,
+                "previous": pf_committer_b,
             },
             {
-                'title': "Pony Factor (by authorship)",
-                'count': pf_author,
-                'previous': pf_author_b
+                "title": "Pony Factor (by authorship)",
+                "count": pf_author,
+                "previous": pf_author_b,
             },
             {
-                'title': "Meta-Pony Factor (by authorship)",
-                'count': len(cpf),
-                'previous': len(cpf_b)
-            }
+                "title": "Meta-Pony Factor (by authorship)",
+                "count": len(cpf),
+                "previous": len(cpf_b),
+            },
         ],
-        'okay': True,
-        'responseTime': time.time() - now,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/punchcard.py b/kibble/api/pages/code/punchcard.py
index babbaeb..f588b3b 100644
--- a/kibble/api/pages/code/punchcard.py
+++ b/kibble/api/pages/code/punchcard.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the commit punch-card renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,97 +80,84 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    which = 'committer_email'
-    role = 'committer'
-    if indata.get('author', False):
-        which = 'author_email'
-        role = 'author'
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'day')
+    which = "committer_email"
+    role = "committer"
+    if indata.get("author", False):
+        which = "author_email"
+        role = "author"
 
+    interval = indata.get("interval", "day")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"committer_email": indata.get("email")}},
+            {"term": {"author_email": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get number of committers, this period
-    query['aggs'] = {
-            'commits': {
-                'date_histogram': {
-                    'field': 'date',
-                    'interval': 'hour',
-                    "format": "E - k"
-                }
-            }
+    query["aggs"] = {
+        "commits": {
+            "date_histogram": {"field": "date", "interval": "hour", "format": "E - k"}
         }
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     timeseries = {}
-    for bucket in res['aggregations']['commits']['buckets']:
-        ts = bucket['key_as_string']
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["commits"]["buckets"]:
+        ts = bucket["key_as_string"]
+        count = bucket["doc_count"]
         timeseries[ts] = timeseries.get(ts, 0) + count
 
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'punchcard'  # Recommendation for the UI
-        },
-        'timeseries': timeseries,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now
+        "widgetType": {"chartType": "punchcard"},  # Recommendation for the UI
+        "timeseries": timeseries,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/relationships.py b/kibble/api/pages/code/relationships.py
index f722b7c..43b1a9e 100644
--- a/kibble/api/pages/code/relationships.py
+++ b/kibble/api/pages/code/relationships.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the committer relationship list renderer for Kibble
 """
@@ -76,6 +72,7 @@ import copy
 import re
 import math
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -86,71 +83,61 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    which = 'committer_email'
-    role = 'committer'
-    if indata.get('author', False):
-        which = 'author_email'
-        role = 'author'
-
-    interval = indata.get('interval', 'day')
+    which = "committer_email"
+    role = "committer"
+    if indata.get("author", False):
+        which = "author_email"
+        role = "author"
 
+    interval = indata.get("interval", "day")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['must'].append({'term': {'committer_email' if not indata.get('author') else 'author_email': indata.get('email')}})
-
-    # Get number of commits, this period, per repo
-    query['aggs'] = {
-            'per_repo': {
-                'terms': {
-                    'field': 'sourceID',
-                    'size': 10000
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["must"].append(
+            {
+                "term": {
+                    "committer_email"
+                    if not indata.get("author")
+                    else "author_email": indata.get("email")
                 }
             }
-        }
-    res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
         )
 
+    # Get number of commits, this period, per repo
+    query["aggs"] = {"per_repo": {"terms": {"field": "sourceID", "size": 10000}}}
+    res = session.DB.ES.search(
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+
     repos = {}
     repo_commits = {}
     authorlinks = {}
@@ -158,33 +145,32 @@ def run(API, environ, indata, session):
     max_links = 0
     max_shared = 0
     max_authors = 0
-    minLinks = indata.get('links', 1)
+    minLinks = indata.get("links", 1)
 
     # For each repo, count commits and gather data on authors
-    for doc in res['aggregations']['per_repo']['buckets']:
-        sourceID = doc['key']
-        commits = doc['doc_count']
+    for doc in res["aggregations"]["per_repo"]["buckets"]:
+        sourceID = doc["key"]
+        commits = doc["doc_count"]
 
         # Gather the unique authors/committers
-        query['aggs'] = {
-            'per_contributor': {
-                'terms': {
-                    'field': 'committer_email' if not indata.get('author') else 'author_email',
-                    'size': 10000
+        query["aggs"] = {
+            "per_contributor": {
+                "terms": {
+                    "field": "committer_email"
+                    if not indata.get("author")
+                    else "author_email",
+                    "size": 10000,
                 }
             }
         }
         xquery = copy.deepcopy(query)
-        xquery['query']['bool']['must'].append({'term': {'sourceID': sourceID}})
+        xquery["query"]["bool"]["must"].append({"term": {"sourceID": sourceID}})
         xres = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = xquery
+            index=session.DB.dbname, doc_type="code_commit", size=0, body=xquery
         )
         authors = []
-        for person in xres['aggregations']['per_contributor']['buckets']:
-            authors.append(person['key'])
+        for person in xres["aggregations"]["per_contributor"]["buckets"]:
+            authors.append(person["key"])
         if commits > max_commits:
             max_commits = commits
         repos[sourceID] = authors
@@ -199,9 +185,11 @@ def run(API, environ, indata, session):
     # Grab data of all sources
     for ID, repo in repos.items():
         mylinks = {}
-        if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID):
+        if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID):
             continue
-        repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID)
+        repodatas[ID] = session.DB.ES.get(
+            index=session.DB.dbname, doc_type="source", id=ID
+        )
 
     for ID, repo in repos.items():
         mylinks = {}
@@ -209,49 +197,59 @@ def run(API, environ, indata, session):
             continue
         repodata = repodatas[ID]
         oID = ID
-        if indata.get('collapse'):
-            m = re.search(indata.get('collapse'), repodata['_source']['sourceURL'])
+        if indata.get("collapse"):
+            m = re.search(indata.get("collapse"), repodata["_source"]["sourceURL"])
             if m:
                 ID = m.group(1)
         else:
-            ID = re.sub(r"^.+/", "", repodata['_source']['sourceURL'])
+            ID = re.sub(r"^.+/", "", repodata["_source"]["sourceURL"])
         for xID, xrepo in repos.items():
             if xID in repodatas:
                 xrepodata = repodatas[xID]
-                if indata.get('collapse'):
-                    m = re.search(indata.get('collapse'), xrepodata['_source']['sourceURL'])
+                if indata.get("collapse"):
+                    m = re.search(
+                        indata.get("collapse"), xrepodata["_source"]["sourceURL"]
+                    )
                     if m:
                         xID = m.group(1)
                 else:
-                    xID = re.sub(r"^.+/", "", xrepodata['_source']['sourceURL'])
+                    xID = re.sub(r"^.+/", "", xrepodata["_source"]["sourceURL"])
                 if xID != ID:
                     xlinks = []
                     for author in xrepo:
                         if author in repo:
                             xlinks.append(author)
-                    lname = "%s@%s" % (ID, xID) # Link name
-                    rname = "%s@%s" % (xID, ID) # Reverse link name
+                    lname = "%s@%s" % (ID, xID)  # Link name
+                    rname = "%s@%s" % (xID, ID)  # Reverse link name
                     if len(xlinks) >= minLinks and not rname in repo_links:
                         mylinks[xID] = len(xlinks)
-                        repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B?
+                        repo_links[lname] = repo_links.get(lname, 0) + len(
+                            xlinks
+                        )  # How many contributors in common between project A and B?
                         if repo_links[lname] > max_shared:
                             max_shared = repo_links[lname]
         if ID not in repo_notoriety:
             repo_notoriety[ID] = set()
-        repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to?
+        repo_notoriety[ID].update(
+            mylinks.keys()
+        )  # How many projects is this repo connected to?
 
         if ID not in repo_authors:
             repo_authors[ID] = set()
-        repo_authors[ID].update(repo) # How many projects is this repo connected to?
+        repo_authors[ID].update(repo)  # How many projects is this repo connected to?
 
         if ID != oID:
             repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID]
             if repo_commits[ID] > max_commits:
-                max_commits = repo_commits[ID] # Used for calculating max link thickness
+                max_commits = repo_commits[
+                    ID
+                ]  # Used for calculating max link thickness
         if len(repo_notoriety[ID]) > max_links:
             max_links = len(repo_notoriety[ID])
         if len(repo_authors[ID]) > max_authors:
-            max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts
+            max_authors = len(
+                repo_authors[ID]
+            )  # Used for calculating max sphere size in charts
 
     # Now, pull it all together!
     nodes = []
@@ -260,45 +258,44 @@ def run(API, environ, indata, session):
     for sourceID in repo_notoriety.keys():
         lsize = 0
         for k in repo_links.keys():
-            fr, to = k.split('@')
+            fr, to = k.split("@")
             if fr == sourceID or to == sourceID:
                 lsize += 1
         asize = len(repo_authors[sourceID])
         doc = {
-            'id': sourceID,
-            'name': sourceID,
-            'commits': repo_commits[sourceID],
-            'authors': asize,
-            'links': lsize,
-            'size': max(5, (1 - abs(math.log10(asize / max_authors))) * 45),
-            'tooltip': "%u connections, %u contributors, %u commits" % (lsize, asize, repo_commits[sourceID])
+            "id": sourceID,
+            "name": sourceID,
+            "commits": repo_commits[sourceID],
+            "authors": asize,
+            "links": lsize,
+            "size": max(5, (1 - abs(math.log10(asize / max_authors))) * 45),
+            "tooltip": "%u connections, %u contributors, %u commits"
+            % (lsize, asize, repo_commits[sourceID]),
         }
         nodes.append(doc)
         existing_repos.append(sourceID)
 
     for k, s in repo_links.items():
         size = s
-        fr, to = k.split('@')
+        fr, to = k.split("@")
         if fr in existing_repos and to in existing_repos:
             doc = {
-                'source': fr,
-                'target': to,
-                'value': max(1, (size/max_shared) * 8),
-                'name': "%s &#8596; %s" % (fr, to),
-                'tooltip': "%u committers in common" % size
+                "source": fr,
+                "target": to,
+                "value": max(1, (size / max_shared) * 8),
+                "name": "%s &#8596; %s" % (fr, to),
+                "tooltip": "%u committers in common" % size,
             }
             links.append(doc)
 
     JSON_OUT = {
-        'maxLinks': max_links,
-        'maxShared': max_shared,
-        'widgetType': {
-            'chartType': 'link'  # Recommendation for the UI
-        },
-        'links': links,
-        'nodes': nodes,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now
+        "maxLinks": max_links,
+        "maxShared": max_shared,
+        "widgetType": {"chartType": "link"},  # Recommendation for the UI
+        "links": links,
+        "nodes": nodes,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/retention.py b/kibble/api/pages/code/retention.py
index 31debdc..0ae308a 100644
--- a/kibble/api/pages/code/retention.py
+++ b/kibble/api/pages/code/retention.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the code contributor retention factor renderer for Kibble
 """
@@ -74,6 +70,7 @@ import time
 import re
 import datetime
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -84,13 +81,14 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    hl = indata.get(
+        "span", 12
+    )  # By default, we define a contributor as active if having committer in the past year
     tnow = datetime.date.today()
     nm = tnow.month - (tnow.month % 3)
     ny = tnow.year
@@ -107,7 +105,7 @@ def run(API, environ, indata, session):
     FoundSomething = False
 
     ny = 1970
-    while ny < cy or (ny == cy and (nm+3) <= tnow.month):
+    while ny < cy or (ny == cy and (nm + 3) <= tnow.month):
         d = datetime.date(ny, nm, 1)
         t = time.mktime(d.timetuple())
         nm += 3
@@ -121,76 +119,51 @@ def run(API, environ, indata, session):
 
         ####################################################################
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'tsday': {
-                                            'from': t,
-                                            'to': tf
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"tsday": {"from": t, "to": tf}}},
+                        {"term": {"organisation": dOrg}},
+                    ]
                 }
+            }
+        }
         # Source-specific or view-specific??
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
         elif viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
         # Get an initial count of commits
         res = session.DB.ES.count(
-                index=session.DB.dbname,
-                doc_type="code_commit",
-                body = query
-            )
+            index=session.DB.dbname, doc_type="code_commit", body=query
+        )
 
-        globcount = res['count']
+        globcount = res["count"]
         if globcount == 0 and not FoundSomething:
             continue
         FoundSomething = True
 
         # Get top 1000 committers this period
-        query['aggs'] = {
-                'by_committer': {
-                    'terms': {
-                        'field': 'committer_email',
-                        'size': 25000
-                    }
-                },
-                'by_author': {
-                    'terms': {
-                        'field': 'author_email',
-                        'size': 25000
-                    }
-                }
-            }
+        query["aggs"] = {
+            "by_committer": {"terms": {"field": "committer_email", "size": 25000}},
+            "by_author": {"terms": {"field": "author_email", "size": 25000}},
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="code_commit",
-                size = 0,
-                body = query
-            )
-
+            index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+        )
 
         retained = 0
         added = 0
         lost = 0
 
         thisPeriod = []
-        for bucket in res['aggregations']['by_author']['buckets']:
-            who = bucket['key']
+        for bucket in res["aggregations"]["by_author"]["buckets"]:
+            who = bucket["key"]
             thisPeriod.append(who)
             if who not in peopleSeen:
                 peopleSeen[who] = tf
@@ -201,7 +174,7 @@ def run(API, environ, indata, session):
 
         prune = []
         for k, v in activePeople.items():
-            if v < (t - (hl*30.45*86400)):
+            if v < (t - (hl * 30.45 * 86400)):
                 prune.append(k)
                 lost += 1
 
@@ -210,45 +183,48 @@ def run(API, environ, indata, session):
             del peopleSeen[who]
         retained = len(activePeople) - added
 
-        ts.append({
-            'date': tf,
-            'People who (re)joined': added,
-            'People who quit': lost,
-            'People retained': retained,
-            'Active people': added + retained
-        })
+        ts.append(
+            {
+                "date": tf,
+                "People who (re)joined": added,
+                "People who quit": lost,
+                "People retained": retained,
+                "Active people": added + retained,
+            }
+        )
 
     groups = [
-        ['More than 5 years', (5*365*86400)+1],
-        ['2 - 5 years', (2*365*86400)+1],
-        ['1 - 2 years', (365*86400)],
-        ['Less than a year', 1]
+        ["More than 5 years", (5 * 365 * 86400) + 1],
+        ["2 - 5 years", (2 * 365 * 86400) + 1],
+        ["1 - 2 years", (365 * 86400)],
+        ["Less than a year", 1],
     ]
 
     counts = {}
     totExp = 0
     for person, age in activePeople.items():
         totExp += time.time() - allPeople[person]
-        for el in sorted(groups, key = lambda x: x[1], reverse = True):
+        for el in sorted(groups, key=lambda x: x[1], reverse=True):
             if allPeople[person] <= time.time() - el[1]:
                 counts[el[0]] = counts.get(el[0], 0) + 1
                 break
-    avgyr = (totExp / (86400*365)) / max(len(activePeople),1)
+    avgyr = (totExp / (86400 * 365)) / max(len(activePeople), 1)
 
-    ts = sorted(ts, key = lambda x: x['date'])
+    ts = sorted(ts, key=lambda x: x["date"])
     avgm = ""
     yr = int(avgyr)
-    ym = round((avgyr-yr)*12)
+    ym = round((avgyr - yr) * 12)
     if yr >= 1:
         avgm += "%u year%s" % (yr, "s" if yr != 1 else "")
     if ym > 0:
         avgm += "%s%u month%s" % (", " if yr > 0 else "", ym, "s" if ym != 1 else "")
     JSON_OUT = {
-        'text': "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." % (hl, avgm),
-        'timeseries': ts,
-        'counts': counts,
-        'averageYears': avgyr,
-        'okay': True,
-        'responseTime': time.time() - now,
+        "text": "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s."
+        % (hl, avgm),
+        "timeseries": ts,
+        "counts": counts,
+        "averageYears": avgyr,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/sloc.py b/kibble/api/pages/code/sloc.py
index 9709fc3..b221f95 100644
--- a/kibble/api/pages/code/sloc.py
+++ b/kibble/api/pages/code/sloc.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,80 +61,62 @@
 ########################################################################
 
 
-
-
-
 """
 This is the SLoC renderer for Kibble
 """
 
 import json
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
     if not session.user:
         raise API.exception(403, "You must be logged in to use this API endpoint! %s")
 
-
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
     # Fetch all sources for default org
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {
-                            'terms': {
-                                'type': ['git', 'svn', 'github']
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"terms": {"type": ["git", "svn", "github"]}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="source",
-            size = 5000,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="source", size=5000, body=query
+    )
 
     languages = {}
     years = 0
-    for hit in res['hits']['hits']:
-        doc = hit['_source']
-        if 'sloc' in doc:
-            sloc = doc['sloc']
-            years += sloc['years']
-            for k, v in sloc['languages'].items():
-                languages[k] = languages.get(k, {'code': 0, 'comment': 0, 'blank': 0})
-                languages[k]['code'] += v.get('code', 0)
-                languages[k]['comment'] += v.get('comment', 0)
-                languages[k]['blank'] += v.get('blank', 0)
-
-
-    JSON_OUT = {
-        'languages': languages,
-        'okay': True,
-        'years': years
-    }
+    for hit in res["hits"]["hits"]:
+        doc = hit["_source"]
+        if "sloc" in doc:
+            sloc = doc["sloc"]
+            years += sloc["years"]
+            for k, v in sloc["languages"].items():
+                languages[k] = languages.get(k, {"code": 0, "comment": 0, "blank": 0})
+                languages[k]["code"] += v.get("code", 0)
+                languages[k]["comment"] += v.get("comment", 0)
+                languages[k]["blank"] += v.get("blank", 0)
+
+    JSON_OUT = {"languages": languages, "okay": True, "years": years}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/top-commits.py b/kibble/api/pages/code/top-commits.py
index 3c782e9..9cda87a 100644
--- a/kibble/api/pages/code/top-commits.py
+++ b/kibble/api/pages/code/top-commits.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN repos by commits list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,83 +80,69 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"committer_email": indata.get("email")}},
+            {"term": {"author_email": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
-
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'by_repo': {
-                'terms': {
-                    'field': 'sourceURL',
-                    'size': 5000
-                }
-            }
-        }
+    query["aggs"] = {"by_repo": {"terms": {"field": "sourceURL", "size": 5000}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
 
     toprepos = []
-    for bucket in res['aggregations']['by_repo']['buckets']:
-        repo = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", bucket['key'])
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_repo"]["buckets"]:
+        repo = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", bucket["key"])
+        count = bucket["doc_count"]
 
         toprepos.append([repo, count])
 
-    toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True)
+    toprepos = sorted(toprepos, key=lambda x: x[1], reverse=True)
     top = toprepos[0:24]
     if len(toprepos) > 25:
         count = 0
@@ -171,9 +154,5 @@ def run(API, environ, indata, session):
     for v in top:
         tophash[v[0]] = v[1]
 
-    JSON_OUT = {
-        'counts': tophash,
-        'okay': True,
-        'responseTime': time.time() - now,
-    }
+    JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/top-sloc.py b/kibble/api/pages/code/top-sloc.py
index 6beafaa..6950ca0 100644
--- a/kibble/api/pages/code/top-sloc.py
+++ b/kibble/api/pages/code/top-sloc.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN repos by SLoC list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,57 +80,47 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'terms':
-                                {
-                                    'type': ['git', 'svn', 'github']
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"terms": {"type": ["git", "svn", "github"]}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="source",
-            size = 5000,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="source", size=5000, body=query
+    )
 
     toprepos = []
-    for doc in res['hits']['hits']:
-        repo = doc['_source']
-        url = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", repo['sourceURL'])
-        if 'sloc' in repo:
-            count = repo['sloc'].get('loc', 0)
+    for doc in res["hits"]["hits"]:
+        repo = doc["_source"]
+        url = re.sub(r".+/([^/]+?)(?:\.git)?$", r"\1", repo["sourceURL"])
+        if "sloc" in repo:
+            count = repo["sloc"].get("loc", 0)
             if not count:
                 count = 0
             toprepos.append([url, count])
 
-    toprepos = sorted(toprepos, key = lambda x: int(x[1]), reverse = True)
+    toprepos = sorted(toprepos, key=lambda x: int(x[1]), reverse=True)
     top = toprepos[0:24]
     if len(toprepos) > 25:
         count = 0
@@ -145,9 +132,5 @@ def run(API, environ, indata, session):
     for v in top:
         tophash[v[0]] = v[1]
 
-    JSON_OUT = {
-        'counts': tophash,
-        'okay': True,
-        'responseTime': time.time() - now,
-    }
+    JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/code/trends.py b/kibble/api/pages/code/trends.py
index da1803c..adb7f1e 100644
--- a/kibble/api/pages/code/trends.py
+++ b/kibble/api/pages/code/trends.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the SLoC renderer for Kibble
 """
@@ -72,6 +68,7 @@ This is the SLoC renderer for Kibble
 import json
 import time
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,268 +79,184 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
     if dateFrom < 0:
         dateFrom = 0
     dateYonder = dateFrom - (dateTo - dateFrom)
 
-
-
     ####################################################################
     # We start by doing all the queries for THIS period.               #
     # Then we reset the query, and change date to yonder-->from        #
     # and rerun the same queries.                                      #
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'committer_email': indata.get('email')}}, {'term': {'author_email': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"committer_email": indata.get("email")}},
+            {"term": {"author_email": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get number of commits, this period
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
-    no_commits = res['count']
-
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
+    no_commits = res["count"]
 
     # Get number of committers, this period
-    query['aggs'] = {
-            'commits': {
-                'cardinality': {
-                    'field': 'committer_email'
-                }
-            },
-            'authors': {
-                'cardinality': {
-                    'field': 'author_email'
-                }
-            }
-
-        }
+    query["aggs"] = {
+        "commits": {"cardinality": {"field": "committer_email"}},
+        "authors": {"cardinality": {"field": "author_email"}},
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-    no_committers = res['aggregations']['commits']['value']
-    no_authors = res['aggregations']['authors']['value']
-
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+    no_committers = res["aggregations"]["commits"]["value"]
+    no_authors = res["aggregations"]["authors"]["value"]
 
     # Get number of insertions, this period
-    query['aggs'] = {
-            'changes': {
-                'sum': {
-                    'field': 'insertions'
-                }
-            }
-        }
+    query["aggs"] = {"changes": {"sum": {"field": "insertions"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-    insertions = res['aggregations']['changes']['value']
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+    insertions = res["aggregations"]["changes"]["value"]
 
     # Get number of deletions, this period
-    query['aggs'] = {
-            'changes': {
-                'sum': {
-                    'field': 'deletions'
-                }
-            }
-        }
+    query["aggs"] = {"changes": {"sum": {"field": "deletions"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-    deletions = res['aggregations']['changes']['value']
-
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+    deletions = res["aggregations"]["changes"]["value"]
 
     ####################################################################
     # Change to PRIOR SPAN                                             #
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'tsday': {
-                                        'from': dateYonder,
-                                        'to': dateFrom-1
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"tsday": {"from": dateYonder, "to": dateFrom - 1}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Path filter?
-    if indata.get('pathfilter'):
-        pf = indata.get('pathfilter')
-        if '!' in pf:
-            pf = pf.replace('!', '')
-            query['query']['bool']['must_not'] = query['query']['bool'].get('must_not', [])
-            query['query']['bool']['must_not'].append({'regexp': {'files_changed': pf}})
+    if indata.get("pathfilter"):
+        pf = indata.get("pathfilter")
+        if "!" in pf:
+            pf = pf.replace("!", "")
+            query["query"]["bool"]["must_not"] = query["query"]["bool"].get(
+                "must_not", []
+            )
+            query["query"]["bool"]["must_not"].append({"regexp": {"files_changed": pf}})
         else:
-            query['query']['bool']['must'].append({'regexp': {'files_changed': pf}})
-
+            query["query"]["bool"]["must"].append({"regexp": {"files_changed": pf}})
 
     # Get number of commits, this period
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            body = query
-        )
-    no_commits_before = res['count']
+        index=session.DB.dbname, doc_type="code_commit", body=query
+    )
+    no_commits_before = res["count"]
 
     # Get number of committers, this period
-    query['aggs'] = {
-            'commits': {
-                'cardinality': {
-                    'field': 'committer_email'
-                }
-            },
-            'authors': {
-                'cardinality': {
-                    'field': 'author_email'
-                }
-            }
-        }
+    query["aggs"] = {
+        "commits": {"cardinality": {"field": "committer_email"}},
+        "authors": {"cardinality": {"field": "author_email"}},
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-    no_committers_before = res['aggregations']['commits']['value']
-    no_authors_before = res['aggregations']['authors']['value']
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+    no_committers_before = res["aggregations"]["commits"]["value"]
+    no_authors_before = res["aggregations"]["authors"]["value"]
 
     # Get number of insertions, this period
-    query['aggs'] = {
-            'changes': {
-                'sum': {
-                    'field': 'insertions'
-                }
-            }
-        }
+    query["aggs"] = {"changes": {"sum": {"field": "insertions"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-    insertions_before = res['aggregations']['changes']['value']
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+    insertions_before = res["aggregations"]["changes"]["value"]
 
-     # Get number of deletions, this period
-    query['aggs'] = {
-            'changes': {
-                'sum': {
-                    'field': 'deletions'
-                }
-            }
-        }
+    # Get number of deletions, this period
+    query["aggs"] = {"changes": {"sum": {"field": "deletions"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="code_commit",
-            size = 0,
-            body = query
-        )
-    deletions_before = res['aggregations']['changes']['value']
-
-
+        index=session.DB.dbname, doc_type="code_commit", size=0, body=query
+    )
+    deletions_before = res["aggregations"]["changes"]["value"]
 
     trends = {
         "committers": {
-            'before': no_committers_before,
-            'after': no_committers,
-            'title': "Committers this period"
+            "before": no_committers_before,
+            "after": no_committers,
+            "title": "Committers this period",
         },
         "authors": {
-            'before': no_authors_before,
-            'after': no_authors,
-            'title': "Authors this period"
+            "before": no_authors_before,
+            "after": no_authors,
+            "title": "Authors this period",
         },
-        'commits': {
-            'before': no_commits_before,
-            'after': no_commits,
-            'title': "Commits this period"
+        "commits": {
+            "before": no_commits_before,
+            "after": no_commits,
+            "title": "Commits this period",
+        },
+        "changes": {
+            "before": insertions_before + deletions_before,
+            "after": insertions + deletions,
+            "title": "Lines changed this period",
         },
-        'changes': {
-            'before': insertions_before + deletions_before,
-            'after': insertions + deletions,
-            'title': "Lines changed this period"
-        }
     }
 
-    JSON_OUT = {
-        'trends': trends,
-        'okay': True,
-        'responseTime': time.time() - now
-    }
+    JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
 
+
 """
 commits = {
                 before = pcommits,
diff --git a/kibble/api/pages/filters.py b/kibble/api/pages/filters.py
index cedf96e..32b1360 100644
--- a/kibble/api/pages/filters.py
+++ b/kibble/api/pages/filters.py
@@ -23,6 +23,7 @@ import json
 import re
 import time
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -30,36 +31,26 @@ def run(API, environ, indata, session):
         raise API.exception(403, "You must be logged in to use this API endpoint! %s")
 
     # Fetch all sources for default org
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="view",
-            size = 5000,
-            body = {
-                'query': {
-                    'term': {
-                        'owner': session.user['email']
-                    }
-                }
-            }
-        )
+        index=session.DB.dbname,
+        doc_type="view",
+        size=5000,
+        body={"query": {"term": {"owner": session.user["email"]}}},
+    )
 
     sources = []
-    for hit in res['hits']['hits']:
-        doc = hit['_source']
-        if indata.get('quick'):
+    for hit in res["hits"]["hits"]:
+        doc = hit["_source"]
+        if indata.get("quick"):
             xdoc = {
-                'sourceID': doc['sourceID'],
-                'type': doc['type'],
-                'sourceURL': doc['sourceURL']
-                }
+                "sourceID": doc["sourceID"],
+                "type": doc["type"],
+                "sourceURL": doc["sourceURL"],
+            }
             sources.append(xdoc)
         else:
             sources.append(doc)
 
-    JSON_OUT = {
-        'views': sources,
-        'okay': True,
-        'organisation': dOrg
-    }
+    JSON_OUT = {"views": sources, "okay": True, "organisation": dOrg}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/actors.py b/kibble/api/pages/forum/actors.py
index ede9f6a..a9b1d18 100644
--- a/kibble/api/pages/forum/actors.py
+++ b/kibble/api/pages/forum/actors.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the forum actors stats page for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,152 +80,107 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}]
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}}
+        ]
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'createdDate',
-                    'interval': interval
-                },
-                'aggs': {
-                    'by_user': {
-                        'cardinality': {
-                            'field': 'creator'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "createdDate", "interval": interval},
+            "aggs": {"by_user": {"cardinality": {"field": "creator"}}},
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="forum_post", size=0, body=query
+    )
 
     timeseries = {}
 
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        ccount = bucket['by_user']['value']
-        timeseries[ts] = {
-            'date': ts,
-            'topic responders': ccount,
-            'topic creators': 0
-        }
-
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        ccount = bucket["by_user"]["value"]
+        timeseries[ts] = {"date": ts, "topic responders": ccount, "topic creators": 0}
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'creator': indata.get('email')}}]
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [{"term": {"creator": indata.get("email")}}]
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'createdDate',
-                    'interval': interval
-                },
-                'aggs': {
-                    'by_user': {
-                        'cardinality': {
-                            'field': 'creator'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "createdDate", "interval": interval},
+            "aggs": {"by_user": {"cardinality": {"field": "creator"}}},
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="forum_topic", size=0, body=query
+    )
 
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        ccount = bucket['by_user']['value']
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        ccount = bucket["by_user"]["value"]
         if ts in timeseries:
-            timeseries[ts]['topic creators'] = ccount
+            timeseries[ts]["topic creators"] = ccount
         else:
             timeseries[ts] = {
-                'date': ts,
-                'topic creators': 0,
-                'topic responders': ccount
+                "date": ts,
+                "topic creators": 0,
+                "topic responders": ccount,
             }
 
     ts = []
@@ -236,11 +188,9 @@ def run(API, environ, indata, session):
         ts.append(el)
 
     JSON_OUT = {
-        'timeseries': ts,
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar'
-        }
+        "timeseries": ts,
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/creators.py b/kibble/api/pages/forum/creators.py
index f5f9270..1a9e8b6 100644
--- a/kibble/api/pages/forum/creators.py
+++ b/kibble/api/pages/forum/creators.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN issue openers list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,101 +80,78 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
     xtitle = None
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}})
-        xtitle = "People opening issues solved by %s" % indata.get('email')
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"creator": indata.get("email")}}
+        )
+        xtitle = "People opening issues solved by %s" % indata.get("email")
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'committers': {
-                'terms': {
-                    'field': 'creator',
-                    'size': 25
-                },
-                'aggs': {
-
-            }
-        }
+    query["aggs"] = {
+        "committers": {"terms": {"field": "creator", "size": 25}, "aggs": {}}
     }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="forum_topic", size=0, body=query
+    )
 
     people = {}
-    for bucket in res['aggregations']['committers']['buckets']:
-        email = bucket['key']
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["committers"]["buckets"]:
+        email = bucket["key"]
+        count = bucket["doc_count"]
         sha = email
-        if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha):
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
             pres = session.DB.ES.get(
-                index=session.DB.dbname,
-                doc_type="person",
-                id = email
-                )
-            person = pres['_source']
-            person['name'] = person.get('name', 'unknown')
+                index=session.DB.dbname, doc_type="person", id=email
+            )
+            person = pres["_source"]
+            person["name"] = person.get("name", "unknown")
             people[email] = person
-            people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest()
-            people[email]['count'] = count
+            people[email]["gravatar"] = hashlib.md5(
+                person.get("email", "unknown").encode("utf-8")
+            ).hexdigest()
+            people[email]["count"] = count
 
     topN = []
     for email, person in people.items():
         topN.append(person)
-    topN = sorted(topN, key = lambda x: x['count'], reverse = True)
+    topN = sorted(topN, key=lambda x: x["count"], reverse=True)
     JSON_OUT = {
-        'topN': {
-            'denoter': 'topics created',
-            'items': topN,
-        },
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar',
-            'title': xtitle
-        }
+        "topN": {"denoter": "topics created", "items": topN},
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar", "title": xtitle},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/issues.py b/kibble/api/pages/forum/issues.py
index b4f7fc0..8c4bbe8 100644
--- a/kibble/api/pages/forum/issues.py
+++ b/kibble/api/pages/forum/issues.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the forum timeseries renderer for Kibble
 """
@@ -78,10 +74,11 @@ import hashlib
 def makeTS(dist):
     ts = {}
     for k in dist:
-        ts[k + ' topics'] = 0
-        ts[k + ' replies'] = 0
+        ts[k + " topics"] = 0
+        ts[k + " replies"] = 0
     return ts
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -92,27 +89,26 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
 
     # By default, we lump generic forums and question/answer (like SO, askbot) together as one
-    distinct = {
-        'forum': ['discourse', 'stackoverflow', 'askbot']
-    }
+    distinct = {"forum": ["discourse", "stackoverflow", "askbot"]}
 
     # If requested, we split them into two
-    if indata.get('distinguish', False):
+    if indata.get("distinguish", False):
         distinct = {
-            'forum':        ['discourse'],
-            'question bank': ['stackoverflow', 'askbot']
+            "forum": ["discourse"],
+            "question bank": ["stackoverflow", "askbot"],
         }
 
     timeseries = {}
@@ -123,138 +119,106 @@ def run(API, environ, indata, session):
         ####################################################################
         # ISSUES OPENED                                                    #
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'created': {
-                                            'from': dateFrom,
-                                            'to': dateTo
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                },
-                                {
-                                    'terms': {
-                                        'type': iValues
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                        {"term": {"organisation": dOrg}},
+                        {"terms": {"type": iValues}},
+                    ]
                 }
+            }
+        }
         # Source-specific or view-specific??
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
         elif viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-        if indata.get('email'):
-            query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+        if indata.get("email"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"creator": indata.get("email")}}
+            )
 
         # Get number of opened ones, this period
-        query['aggs'] = {
-                'commits': {
-                    'date_histogram': {
-                        'field': 'createdDate',
-                        'interval': interval
-                    }
-                }
+        query["aggs"] = {
+            "commits": {
+                "date_histogram": {"field": "createdDate", "interval": interval}
             }
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="forum_topic",
-                size = 0,
-                body = query
-            )
+            index=session.DB.dbname, doc_type="forum_topic", size=0, body=query
+        )
 
-        for bucket in res['aggregations']['commits']['buckets']:
-            ts = int(bucket['key'] / 1000)
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["commits"]["buckets"]:
+            ts = int(bucket["key"] / 1000)
+            count = bucket["doc_count"]
             timeseries[ts] = timeseries.get(ts, makeTS(distinct))
-            timeseries[ts][iType + ' topics'] = timeseries[ts].get(iType + ' topics', 0) + count
-
+            timeseries[ts][iType + " topics"] = (
+                timeseries[ts].get(iType + " topics", 0) + count
+            )
 
         ####################################################################
         # ISSUES CLOSED                                                    #
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'created': {
-                                            'from': dateFrom,
-                                            'to': dateTo
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                },
-                                {
-                                    'terms': {
-                                        'type': iValues
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                        {"term": {"organisation": dOrg}},
+                        {"terms": {"type": iValues}},
+                    ]
                 }
+            }
+        }
         if viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
-        if indata.get('email'):
-            query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
+        if indata.get("email"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"creator": indata.get("email")}}
+            )
 
         # Get number of closed ones, this period
-        query['aggs'] = {
-                'commits': {
-                    'date_histogram': {
-                        'field': 'createdDate',
-                        'interval': interval
-                    }
-                }
+        query["aggs"] = {
+            "commits": {
+                "date_histogram": {"field": "createdDate", "interval": interval}
             }
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="forum_post",
-                size = 0,
-                body = query
-            )
+            index=session.DB.dbname, doc_type="forum_post", size=0, body=query
+        )
 
-        for bucket in res['aggregations']['commits']['buckets']:
-            ts = int(bucket['key'] / 1000)
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["commits"]["buckets"]:
+            ts = int(bucket["key"] / 1000)
+            count = bucket["doc_count"]
             timeseries[ts] = timeseries.get(ts, makeTS(distinct))
-            timeseries[ts][iType + ' replies'] = timeseries[ts].get(iType + ' replies', 0) + count
+            timeseries[ts][iType + " replies"] = (
+                timeseries[ts].get(iType + " replies", 0) + count
+            )
 
     ts = []
     for k, v in timeseries.items():
-        v['date'] = k
+        v["date"] = k
         ts.append(v)
 
-
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'line',  # Recommendation for the UI
-            'nofill': True
+        "widgetType": {
+            "chartType": "line",  # Recommendation for the UI
+            "nofill": True,
         },
-        'timeseries': ts,
-        'interval': interval,
-        'okay': True,
-        'distinguishable': True,
-        'responseTime': time.time() - now
+        "timeseries": ts,
+        "interval": interval,
+        "okay": True,
+        "distinguishable": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/responders.py b/kibble/api/pages/forum/responders.py
index 0ffa5a3..a25481d 100644
--- a/kibble/api/pages/forum/responders.py
+++ b/kibble/api/pages/forum/responders.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN forum posters list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,102 +80,78 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
     xtitle = None
 
-
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['must'].append({'term': {'creator': indata.get('email')}})
-        xTitle = "People closing %s's issues" % indata.get('email')
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"creator": indata.get("email")}}
+        )
+        xTitle = "People closing %s's issues" % indata.get("email")
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'committers': {
-                'terms': {
-                    'field': 'creator',
-                    'size': 25
-                },
-                'aggs': {
-
-            }
-        }
+    query["aggs"] = {
+        "committers": {"terms": {"field": "creator", "size": 25}, "aggs": {}}
     }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="forum_post", size=0, body=query
+    )
 
     people = {}
-    for bucket in res['aggregations']['committers']['buckets']:
-        email = bucket['key']
-        count = bucket['doc_count']
+    for bucket in res["aggregations"]["committers"]["buckets"]:
+        email = bucket["key"]
+        count = bucket["doc_count"]
         sha = email
-        if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha):
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
             pres = session.DB.ES.get(
-                index=session.DB.dbname,
-                doc_type="person",
-                id = email
-                )
-            person = pres['_source']
-            person['name'] = person.get('name', 'unknown')
+                index=session.DB.dbname, doc_type="person", id=email
+            )
+            person = pres["_source"]
+            person["name"] = person.get("name", "unknown")
             people[email] = person
-            people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest()
-            people[email]['count'] = count
+            people[email]["gravatar"] = hashlib.md5(
+                person.get("email", "unknown").encode("utf-8")
+            ).hexdigest()
+            people[email]["count"] = count
 
     topN = []
     for email, person in people.items():
         topN.append(person)
-    topN = sorted(topN, key = lambda x: x['count'], reverse = True)
+    topN = sorted(topN, key=lambda x: x["count"], reverse=True)
     JSON_OUT = {
-        'topN': {
-            'denoter': 'replies posted',
-            'items': topN,
-        },
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar',
-            'title': xtitle
-        }
+        "topN": {"denoter": "replies posted", "items": topN},
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar", "title": xtitle},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/top-count.py b/kibble/api/pages/forum/top-count.py
index 545567a..35947b0 100644
--- a/kibble/api/pages/forum/top-count.py
+++ b/kibble/api/pages/forum/top-count.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN repos by commits list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,76 +80,57 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [
-            {'term': {'creator': indata.get('email')}}
-        ]
-
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [{"term": {"creator": indata.get("email")}}]
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'by_repo': {
-                'terms': {
-                    'field': 'sourceID',
-                    'size': 5000
-                }
-            }
-        }
+    query["aggs"] = {"by_repo": {"terms": {"field": "sourceID", "size": 5000}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="forum_post", size=0, body=query
+    )
 
     toprepos = []
-    for bucket in res['aggregations']['by_repo']['buckets']:
-        ID = bucket['key']
-        if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID):
-            it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID)['_source']
-            repo = re.sub(r".+/([^/]+)$", r"\1", it['sourceURL'])
-            count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_repo"]["buckets"]:
+        ID = bucket["key"]
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID):
+            it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id=ID)[
+                "_source"
+            ]
+            repo = re.sub(r".+/([^/]+)$", r"\1", it["sourceURL"])
+            count = bucket["doc_count"]
             toprepos.append([repo, count])
 
-    toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True)
+    toprepos = sorted(toprepos, key=lambda x: x[1], reverse=True)
     top = toprepos[0:24]
     if len(toprepos) > 25:
         count = 0
@@ -164,9 +142,5 @@ def run(API, environ, indata, session):
     for v in top:
         tophash[v[0]] = v[1]
 
-    JSON_OUT = {
-        'counts': tophash,
-        'okay': True,
-        'responseTime': time.time() - now,
-    }
+    JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/top.py b/kibble/api/pages/forum/top.py
index 483aef1..7775a17 100644
--- a/kibble/api/pages/forum/top.py
+++ b/kibble/api/pages/forum/top.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the issue actors stats page for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,79 +80,58 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                },
-                'sort': {
-                    'posts': 'desc'
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        },
+        "sort": {"posts": "desc"},
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'creator': indata.get('email')}}]
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [{"term": {"creator": indata.get("email")}}]
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            size = 25,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="forum_topic", size=25, body=query
+    )
     top = []
-    for bucket in res['hits']['hits']:
-        doc = bucket['_source']
-        doc['source'] = doc.get('url', '#')
-        doc['name'] = doc.get('type', 'unknown')
-        doc['subject'] = doc.get('title')
-        doc['count'] = doc.get('posts', 0)
+    for bucket in res["hits"]["hits"]:
+        doc = bucket["_source"]
+        doc["source"] = doc.get("url", "#")
+        doc["name"] = doc.get("type", "unknown")
+        doc["subject"] = doc.get("title")
+        doc["count"] = doc.get("posts", 0)
         top.append(doc)
 
-
     JSON_OUT = {
-        'topN': {
-            'denoter': 'interactions',
-            'icon': 'comment',
-            'items': top
-        },
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'line'
-        }
+        "topN": {"denoter": "interactions", "icon": "comment", "items": top},
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "line"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/forum/trends.py b/kibble/api/pages/forum/trends.py
index 27efa26..f6ec610 100644
--- a/kibble/api/pages/forum/trends.py
+++ b/kibble/api/pages/forum/trends.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the forum trends renderer for Kibble
 """
@@ -72,6 +68,7 @@ This is the forum trends renderer for Kibble
 import json
 import time
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,20 +79,20 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
     if dateFrom < 0:
         dateFrom = 0
     dateYonder = dateFrom - (dateTo - dateFrom)
 
-
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
 
     ####################################################################
     # We start by doing all the queries for THIS period.               #
@@ -103,250 +100,157 @@ def run(API, environ, indata, session):
     # and rerun the same queries.                                      #
     ####################################################################
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get number of issues created, this period
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            body = query
-        )
-    no_issues_created = res['count']
-
+        index=session.DB.dbname, doc_type="forum_topic", body=query
+    )
+    no_issues_created = res["count"]
 
     # Get number of open/close, this period
-    query['aggs'] = {
-            'opener': {
-                'cardinality': {
-                    'field': 'creator'
-                }
-            }
-        }
+    query["aggs"] = {"opener": {"cardinality": {"field": "creator"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            size = 0,
-            body = query
-        )
-    no_creators = res['aggregations']['opener']['value']
-
+        index=session.DB.dbname, doc_type="forum_topic", size=0, body=query
+    )
+    no_creators = res["aggregations"]["opener"]["value"]
 
     # REPLIERS
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get number of issues created, this period
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            body = query
-        )
-    no_issues_closed = res['count']
-
+        index=session.DB.dbname, doc_type="forum_post", body=query
+    )
+    no_issues_closed = res["count"]
 
     # Get number of open/close, this period
-    query['aggs'] = {
-            'closer': {
-                'cardinality': {
-                    'field': 'creator'
-                }
-            }
-        }
+    query["aggs"] = {"closer": {"cardinality": {"field": "creator"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            size = 0,
-            body = query
-        )
-    no_closers = res['aggregations']['closer']['value']
-
+        index=session.DB.dbname, doc_type="forum_post", size=0, body=query
+    )
+    no_closers = res["aggregations"]["closer"]["value"]
 
     ####################################################################
     # Change to PRIOR SPAN                                             #
     ####################################################################
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateYonder,
-                                        'to': dateFrom-1
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateYonder, "to": dateFrom - 1}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
 
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get number of issues, this period
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            body = query
-        )
-    no_issues_created_before = res['count']
+        index=session.DB.dbname, doc_type="forum_topic", body=query
+    )
+    no_issues_created_before = res["count"]
 
     # Get number of committers, this period
-    query['aggs'] = {
-            'opener': {
-                'cardinality': {
-                    'field': 'creator'
-                }
-            }
-        }
+    query["aggs"] = {"opener": {"cardinality": {"field": "creator"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_topic",
-            size = 0,
-            body = query
-        )
-    no_creators_before = res['aggregations']['opener']['value']
-
-
+        index=session.DB.dbname, doc_type="forum_topic", size=0, body=query
+    )
+    no_creators_before = res["aggregations"]["opener"]["value"]
 
     # REPLIERS
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateYonder,
-                                        'to': dateFrom-1
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateYonder, "to": dateFrom - 1}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        }
+    }
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     # Get number of issues created, this period
     res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            body = query
-        )
-    no_issues_closed_before = res['count']
-
+        index=session.DB.dbname, doc_type="forum_post", body=query
+    )
+    no_issues_closed_before = res["count"]
 
     # Get number of open/close, this period
-    query['aggs'] = {
-            'closer': {
-                'cardinality': {
-                    'field': "creator"
-                }
-            }
-        }
+    query["aggs"] = {"closer": {"cardinality": {"field": "creator"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="forum_post",
-            size = 0,
-            body = query
-        )
-    no_closers_before = res['aggregations']['closer']['value']
+        index=session.DB.dbname, doc_type="forum_post", size=0, body=query
+    )
+    no_closers_before = res["aggregations"]["closer"]["value"]
 
     trends = {
         "created": {
-            'before': no_issues_created_before,
-            'after': no_issues_created,
-            'title': "Topics started this period"
+            "before": no_issues_created_before,
+            "after": no_issues_created,
+            "title": "Topics started this period",
         },
         "authors": {
-            'before': no_creators_before,
-            'after': no_creators,
-            'title': "People starting topics this period"
+            "before": no_creators_before,
+            "after": no_creators,
+            "title": "People starting topics this period",
         },
         "closed": {
-            'before': no_issues_closed_before,
-            'after': no_issues_closed,
-            'title': "Replies this period"
+            "before": no_issues_closed_before,
+            "after": no_issues_closed,
+            "title": "Replies this period",
         },
         "closers": {
-            'before': no_closers_before,
-            'after': no_closers,
-            'title': "People replying this period"
-        }
+            "before": no_closers_before,
+            "after": no_closers,
+            "title": "People replying this period",
+        },
     }
 
-    JSON_OUT = {
-        'trends': trends,
-        'okay': True,
-        'responseTime': time.time() - now
-    }
+    JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/actors.py b/kibble/api/pages/issue/actors.py
index edcbc54..29308fd 100644
--- a/kibble/api/pages/issue/actors.py
+++ b/kibble/api/pages/issue/actors.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the issue actors stats page for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,165 +80,118 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'closed': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"closed": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'closedDate',
-                    'interval': interval
-                },
-                'aggs': {
-                    'by_user': {
-                        'cardinality': {
-                            'field': 'issueCloser'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "closedDate", "interval": interval},
+            "aggs": {"by_user": {"cardinality": {"field": "issueCloser"}}},
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
 
     timeseries = {}
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        ccount = bucket['by_user']['value']
-        timeseries[ts] = {
-            'date': ts,
-            'closers': ccount,
-            'openers': 0
-        }
-
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        ccount = bucket["by_user"]["value"]
+        timeseries[ts] = {"date": ts, "closers": ccount, "openers": 0}
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'createdDate',
-                    'interval': interval
-                },
-                'aggs': {
-                    'by_user': {
-                        'cardinality': {
-                            'field': 'issueCreator'
-                        }
-                    }
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "createdDate", "interval": interval},
+            "aggs": {"by_user": {"cardinality": {"field": "issueCreator"}}},
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
 
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        ccount = bucket['by_user']['value']
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        ccount = bucket["by_user"]["value"]
         if ts in timeseries:
-            timeseries[ts]['openers'] = ccount
+            timeseries[ts]["openers"] = ccount
         else:
-            timeseries[ts] = {
-                'date': ts,
-                'closers': 0,
-                'openers': ccount
-            }
+            timeseries[ts] = {"date": ts, "closers": 0, "openers": ccount}
 
     ts = []
     for x, el in timeseries.items():
         ts.append(el)
 
     JSON_OUT = {
-        'timeseries': ts,
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar'
-        }
+        "timeseries": ts,
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/age.py b/kibble/api/pages/issue/age.py
index cafdaf4..d5d9869 100644
--- a/kibble/api/pages/issue/age.py
+++ b/kibble/api/pages/issue/age.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the issue actors stats page for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,78 +80,58 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {
-                                'term': {
-                                    'status': 'open'
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [{"term": {"status": "open"}}, {"term": {"organisation": dOrg}}]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get timeseries for this period
-    query['aggs'] = {
-            'per_interval': {
-                'date_histogram': {
-                    'field': 'createdDate',
-                    'interval': interval
-                }
-            }
+    query["aggs"] = {
+        "per_interval": {
+            "date_histogram": {"field": "createdDate", "interval": interval}
         }
+    }
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
     timeseries = []
     opened = 0
-    for bucket in res['aggregations']['per_interval']['buckets']:
-        ts = int(bucket['key'] / 1000)
-        opened += bucket['doc_count']
-        timeseries.append( {
-            'date': ts,
-            'open': opened
-        })
-
-
+    for bucket in res["aggregations"]["per_interval"]["buckets"]:
+        ts = int(bucket["key"] / 1000)
+        opened += bucket["doc_count"]
+        timeseries.append({"date": ts, "open": opened})
 
     JSON_OUT = {
-        'timeseries': timeseries,
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'line'
-        }
+        "timeseries": timeseries,
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "line"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/closers.py b/kibble/api/pages/issue/closers.py
index 90bd56a..94a409a 100644
--- a/kibble/api/pages/issue/closers.py
+++ b/kibble/api/pages/issue/closers.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN issue closers list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,102 +80,76 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
     xtitle = None
 
-
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'closed': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"closed": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['must'].append({'term': {'issueCreator': indata.get('email')}})
-        xTitle = "People closing %s's issues" % indata.get('email')
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"issueCreator": indata.get("email")}}
+        )
+        xTitle = "People closing %s's issues" % indata.get("email")
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'committers': {
-                'terms': {
-                    'field': 'issueCloser',
-                    'size': 25
-                },
-                'aggs': {
-
-            }
-        }
+    query["aggs"] = {
+        "committers": {"terms": {"field": "issueCloser", "size": 25}, "aggs": {}}
     }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
 
     people = {}
-    for bucket in res['aggregations']['committers']['buckets']:
-        email = bucket['key']
-        count = bucket['doc_count']
-        sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest()
-        if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha):
-            pres = session.DB.ES.get(
-                index=session.DB.dbname,
-                doc_type="person",
-                id = sha
-                )
-            person = pres['_source']
-            person['name'] = person.get('name', 'unknown')
+    for bucket in res["aggregations"]["committers"]["buckets"]:
+        email = bucket["key"]
+        count = bucket["doc_count"]
+        sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest()
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
+            pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha)
+            person = pres["_source"]
+            person["name"] = person.get("name", "unknown")
             people[email] = person
-            people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest()
-            people[email]['count'] = count
+            people[email]["gravatar"] = hashlib.md5(
+                person.get("email", "unknown").encode("utf-8")
+            ).hexdigest()
+            people[email]["count"] = count
 
     topN = []
     for email, person in people.items():
         topN.append(person)
-    topN = sorted(topN, key = lambda x: x['count'], reverse = True)
+    topN = sorted(topN, key=lambda x: x["count"], reverse=True)
     JSON_OUT = {
-        'topN': {
-            'denoter': 'issues closed',
-            'items': topN,
-        },
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar',
-            'title': xtitle
-        }
+        "topN": {"denoter": "issues closed", "items": topN},
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar", "title": xtitle},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/issues.py b/kibble/api/pages/issue/issues.py
index b947cb1..bdc6ae0 100644
--- a/kibble/api/pages/issue/issues.py
+++ b/kibble/api/pages/issue/issues.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the issue timeseries renderer for Kibble
 """
@@ -78,10 +74,11 @@ import hashlib
 def makeTS(dist):
     ts = {}
     for k in dist:
-        ts[k + ' opened'] = 0
-        ts[k + ' closed'] = 0
+        ts[k + " opened"] = 0
+        ts[k + " closed"] = 0
     return ts
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -92,28 +89,24 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
 
     # By default, we lump PRs and issues into the same category
-    distinct = {
-        'issues': ['issue', 'pullrequest']
-    }
+    distinct = {"issues": ["issue", "pullrequest"]}
 
     # If requested, we split them into two
-    if indata.get('distinguish', False):
-        distinct = {
-            'issues':        ['issue'],
-            'pull requests': ['pullrequest']
-        }
+    if indata.get("distinguish", False):
+        distinct = {"issues": ["issue"], "pull requests": ["pullrequest"]}
 
     timeseries = {}
 
@@ -123,138 +116,104 @@ def run(API, environ, indata, session):
         ####################################################################
         # ISSUES OPENED                                                    #
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'created': {
-                                            'from': dateFrom,
-                                            'to': dateTo
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                },
-                                {
-                                    'terms': {
-                                        'issuetype': iValues
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                        {"term": {"organisation": dOrg}},
+                        {"terms": {"issuetype": iValues}},
+                    ]
                 }
+            }
+        }
         # Source-specific or view-specific??
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
         elif viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-        if indata.get('email'):
-            query['query']['bool']['must'].append({'term': {'issueCreator': indata.get('email')}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+        if indata.get("email"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"issueCreator": indata.get("email")}}
+            )
 
         # Get number of opened ones, this period
-        query['aggs'] = {
-                'commits': {
-                    'date_histogram': {
-                        'field': 'createdDate',
-                        'interval': interval
-                    }
-                }
+        query["aggs"] = {
+            "commits": {
+                "date_histogram": {"field": "createdDate", "interval": interval}
             }
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="issue",
-                size = 0,
-                body = query
-            )
+            index=session.DB.dbname, doc_type="issue", size=0, body=query
+        )
 
-        for bucket in res['aggregations']['commits']['buckets']:
-            ts = int(bucket['key'] / 1000)
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["commits"]["buckets"]:
+            ts = int(bucket["key"] / 1000)
+            count = bucket["doc_count"]
             timeseries[ts] = timeseries.get(ts, makeTS(distinct))
-            timeseries[ts][iType + ' opened'] = timeseries[ts].get(iType + ' opened', 0) + count
-
+            timeseries[ts][iType + " opened"] = (
+                timeseries[ts].get(iType + " opened", 0) + count
+            )
 
         ####################################################################
         # ISSUES CLOSED                                                    #
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'closed': {
-                                            'from': dateFrom,
-                                            'to': dateTo
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                },
-                                {
-                                    'terms': {
-                                        'issuetype': iValues
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"closed": {"from": dateFrom, "to": dateTo}}},
+                        {"term": {"organisation": dOrg}},
+                        {"terms": {"issuetype": iValues}},
+                    ]
                 }
+            }
+        }
         if viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
-        if indata.get('email'):
-            query['query']['bool']['must'].append({'term': {'issueCloser': indata.get('email')}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
+        if indata.get("email"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"issueCloser": indata.get("email")}}
+            )
 
         # Get number of closed ones, this period
-        query['aggs'] = {
-                'commits': {
-                    'date_histogram': {
-                        'field': 'closedDate',
-                        'interval': interval
-                    }
-                }
-            }
+        query["aggs"] = {
+            "commits": {"date_histogram": {"field": "closedDate", "interval": interval}}
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="issue",
-                size = 0,
-                body = query
-            )
+            index=session.DB.dbname, doc_type="issue", size=0, body=query
+        )
 
-        for bucket in res['aggregations']['commits']['buckets']:
-            ts = int(bucket['key'] / 1000)
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["commits"]["buckets"]:
+            ts = int(bucket["key"] / 1000)
+            count = bucket["doc_count"]
             timeseries[ts] = timeseries.get(ts, makeTS(distinct))
-            timeseries[ts][iType + ' closed'] = timeseries[ts].get(iType + ' closed', 0) + count
+            timeseries[ts][iType + " closed"] = (
+                timeseries[ts].get(iType + " closed", 0) + count
+            )
 
     ts = []
     for k, v in timeseries.items():
-        v['date'] = k
+        v["date"] = k
         ts.append(v)
 
-
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'line',  # Recommendation for the UI
-            'nofill': True
+        "widgetType": {
+            "chartType": "line",  # Recommendation for the UI
+            "nofill": True,
         },
-        'timeseries': ts,
-        'interval': interval,
-        'okay': True,
-        'distinguishable': True,
-        'responseTime': time.time() - now
+        "timeseries": ts,
+        "interval": interval,
+        "okay": True,
+        "distinguishable": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/openers.py b/kibble/api/pages/issue/openers.py
index 8550074..cee81ab 100644
--- a/kibble/api/pages/issue/openers.py
+++ b/kibble/api/pages/issue/openers.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN issue openers list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,101 +80,76 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'month')
+    interval = indata.get("interval", "month")
     xtitle = None
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['must'].append({'term': {'issueCloser': indata.get('email')}})
-        xtitle = "People opening issues solved by %s" % indata.get('email')
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"issueCloser": indata.get("email")}}
+        )
+        xtitle = "People opening issues solved by %s" % indata.get("email")
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'committers': {
-                'terms': {
-                    'field': 'issueCreator',
-                    'size': 25
-                },
-                'aggs': {
-
-            }
-        }
+    query["aggs"] = {
+        "committers": {"terms": {"field": "issueCreator", "size": 25}, "aggs": {}}
     }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
 
     people = {}
-    for bucket in res['aggregations']['committers']['buckets']:
-        email = bucket['key']
-        count = bucket['doc_count']
-        sha = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('utf-8') ).hexdigest()
-        if session.DB.ES.exists(index=session.DB.dbname,doc_type="person",id = sha):
-            pres = session.DB.ES.get(
-                index=session.DB.dbname,
-                doc_type="person",
-                id = sha
-                )
-            person = pres['_source']
-            person['name'] = person.get('name', 'unknown')
+    for bucket in res["aggregations"]["committers"]["buckets"]:
+        email = bucket["key"]
+        count = bucket["doc_count"]
+        sha = hashlib.sha1(("%s%s" % (dOrg, email)).encode("utf-8")).hexdigest()
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
+            pres = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id=sha)
+            person = pres["_source"]
+            person["name"] = person.get("name", "unknown")
             people[email] = person
-            people[email]['gravatar'] = hashlib.md5(person.get('email', 'unknown').encode('utf-8')).hexdigest()
-            people[email]['count'] = count
+            people[email]["gravatar"] = hashlib.md5(
+                person.get("email", "unknown").encode("utf-8")
+            ).hexdigest()
+            people[email]["count"] = count
 
     topN = []
     for email, person in people.items():
         topN.append(person)
-    topN = sorted(topN, key = lambda x: x['count'], reverse = True)
+    topN = sorted(topN, key=lambda x: x["count"], reverse=True)
     JSON_OUT = {
-        'topN': {
-            'denoter': 'issues opened',
-            'items': topN,
-        },
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'bar',
-            'title': xtitle
-        }
+        "topN": {"denoter": "issues opened", "items": topN},
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "bar", "title": xtitle},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/pony-timeseries.py b/kibble/api/pages/issue/pony-timeseries.py
index 22068dd..1c1b2b8 100644
--- a/kibble/api/pages/issue/pony-timeseries.py
+++ b/kibble/api/pages/issue/pony-timeseries.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the pony factor renderer for Kibble
 """
@@ -75,6 +71,7 @@ import re
 import datetime
 import dateutil.relativedelta
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -85,13 +82,12 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    hl = indata.get('span', 24)
+    hl = indata.get("span", 24)
     tnow = datetime.date.today()
     nm = tnow.month - (tnow.month % 3)
     ny = tnow.year
@@ -111,110 +107,86 @@ def run(API, environ, indata, session):
             nm += 12
             ny = ny - 1
 
-
         ####################################################################
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'created': {
-                                            'from': tf,
-                                            'to': t
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"created": {"from": tf, "to": t}}},
+                        {"term": {"organisation": dOrg}},
+                    ]
                 }
+            }
+        }
         # Source-specific or view-specific??
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
         elif viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
         # Get an initial count of commits
-        res = session.DB.ES.count(
-                index=session.DB.dbname,
-                doc_type="issue",
-                body = query
-            )
+        res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
 
-        globcount = res['count']
+        globcount = res["count"]
         if globcount == 0:
             break
 
         # Get top 25 committers this period
-        query['aggs'] = {
-                'by_creator': {
-                    'terms': {
-                        'field': 'issueCreator',
-                        'size': 1000
-                    }
-                },
-                'by_closer': {
-                    'terms': {
-                        'field': 'issueCloser',
-                        'size': 1000
-                    }
-                }
-            }
+        query["aggs"] = {
+            "by_creator": {"terms": {"field": "issueCreator", "size": 1000}},
+            "by_closer": {"terms": {"field": "issueCloser", "size": 1000}},
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="issue",
-                size = 0,
-                body = query
-            )
+            index=session.DB.dbname, doc_type="issue", size=0, body=query
+        )
 
         cpf = {}
 
         # PF for openers
         pf_opener = 0
         pf_opener_count = 0
-        for bucket in res['aggregations']['by_creator']['buckets']:
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["by_creator"]["buckets"]:
+            count = bucket["doc_count"]
             pf_opener += 1
             pf_opener_count += count
-            if '@' in bucket['key']:
-                mldom = bucket['key'].lower().split('@')[-1]
+            if "@" in bucket["key"]:
+                mldom = bucket["key"].lower().split("@")[-1]
                 cpf[mldom] = True
-            if pf_opener_count > int(globcount/2):
+            if pf_opener_count > int(globcount / 2):
                 break
 
         # PF for closer
         pf_closer = 0
         pf_closer_count = 0
-        for bucket in res['aggregations']['by_closer']['buckets']:
-            count = bucket['doc_count']
+        for bucket in res["aggregations"]["by_closer"]["buckets"]:
+            count = bucket["doc_count"]
             pf_closer += 1
             pf_closer_count += count
-            if '@' in bucket['key']:
-                mldom = bucket['key'].lower().split('@')[-1]
+            if "@" in bucket["key"]:
+                mldom = bucket["key"].lower().split("@")[-1]
                 cpf[mldom] = True
-            if pf_closer_count > int(globcount/2):
+            if pf_closer_count > int(globcount / 2):
                 break
-        ts.append({
-            'date': t,
-            'Pony Factor (openers)': pf_opener,
-            'Pony Factor (closers)': pf_closer,
-            'Meta-Pony Factor': len(cpf)
-        })
+        ts.append(
+            {
+                "date": t,
+                "Pony Factor (openers)": pf_opener,
+                "Pony Factor (closers)": pf_closer,
+                "Meta-Pony Factor": len(cpf),
+            }
+        )
 
-    ts = sorted(ts, key = lambda x: x['date'])
+    ts = sorted(ts, key=lambda x: x["date"])
 
     JSON_OUT = {
-        'text': "This shows Pony Factors as calculated over a %u month timespan. Openers measures the people submitting the bulk of the issues, closers mesaures the people closing (resolving) the issues, and meta-pony is an estimation of how many organisations/companies are involved." % hl,
-        'timeseries': ts,
-        'okay': True,
-        'responseTime': time.time() - now,
+        "text": "This shows Pony Factors as calculated over a %u month timespan. Openers measures the people submitting the bulk of the issues, closers mesaures the people closing (resolving) the issues, and meta-pony is an estimation of how many organisations/companies are involved."
+        % hl,
+        "timeseries": ts,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/relationships.py b/kibble/api/pages/issue/relationships.py
index eb8fb76..3b4985b 100644
--- a/kibble/api/pages/issue/relationships.py
+++ b/kibble/api/pages/issue/relationships.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the issue tracker relationship list renderer for Kibble
 """
@@ -76,6 +72,7 @@ import copy
 import re
 import math
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -86,71 +83,56 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    which = 'committer_email'
-    role = 'committer'
-    if indata.get('author', False):
-        which = 'author_email'
-        role = 'author'
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
-    interval = indata.get('interval', 'day')
+    which = "committer_email"
+    role = "committer"
+    if indata.get("author", False):
+        which = "author_email"
+        role = "author"
 
+    interval = indata.get("interval", "day")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'closed': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"closed": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get number of commits, this period, per repo
-    query['aggs'] = {
-            'per_repo': {
-                'terms': {
-                    'field': 'sourceID',
-                    'size': 10000
-                }
-            }
-        }
+    query["aggs"] = {"per_repo": {"terms": {"field": "sourceID", "size": 10000}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
 
     repos = {}
     repo_commits = {}
@@ -161,38 +143,25 @@ def run(API, environ, indata, session):
     max_authors = 0
 
     # For each repo, count commits and gather data on authors
-    for doc in res['aggregations']['per_repo']['buckets']:
-        sourceID = doc['key']
-        commits = doc['doc_count']
+    for doc in res["aggregations"]["per_repo"]["buckets"]:
+        sourceID = doc["key"]
+        commits = doc["doc_count"]
 
         # Gather the unique authors/committers
-        query['aggs'] = {
-            'per_closer': {
-                'terms': {
-                    'field': 'issueCloser',
-                    'size': 10000
-                }
-            },
-            'per_creator': {
-                'terms': {
-                    'field': 'issueCreator',
-                    'size': 10000
-                }
-            }
+        query["aggs"] = {
+            "per_closer": {"terms": {"field": "issueCloser", "size": 10000}},
+            "per_creator": {"terms": {"field": "issueCreator", "size": 10000}},
         }
         xquery = copy.deepcopy(query)
-        xquery['query']['bool']['must'].append({'term': {'sourceID': sourceID}})
+        xquery["query"]["bool"]["must"].append({"term": {"sourceID": sourceID}})
         xres = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = xquery
+            index=session.DB.dbname, doc_type="issue", size=0, body=xquery
         )
         authors = []
-        for person in xres['aggregations']['per_closer']['buckets']:
-            authors.append(person['key'])
-        for person in xres['aggregations']['per_creator']['buckets']:
-            authors.append(person['key'])
+        for person in xres["aggregations"]["per_closer"]["buckets"]:
+            authors.append(person["key"])
+        for person in xres["aggregations"]["per_creator"]["buckets"]:
+            authors.append(person["key"])
         if commits > max_commits:
             max_commits = commits
         repos[sourceID] = authors
@@ -203,14 +172,16 @@ def run(API, environ, indata, session):
     repo_notoriety = {}
     repodatas = {}
     repo_authors = {}
-    minLinks = indata.get('links', 1)
+    minLinks = indata.get("links", 1)
 
     # Grab data of all sources
     for ID, repo in repos.items():
         mylinks = {}
-        if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID):
+        if not session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID):
             continue
-        repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID)
+        repodatas[ID] = session.DB.ES.get(
+            index=session.DB.dbname, doc_type="source", id=ID
+        )
 
     for ID, repo in repos.items():
         mylinks = {}
@@ -218,49 +189,59 @@ def run(API, environ, indata, session):
             continue
         repodata = repodatas[ID]
         oID = ID
-        if indata.get('collapse'):
-            m = re.search(indata.get('collapse'), repodata['_source']['sourceURL'])
+        if indata.get("collapse"):
+            m = re.search(indata.get("collapse"), repodata["_source"]["sourceURL"])
             if m:
                 ID = m.group(1)
         else:
-            ID = re.sub(r"^.+/", "", repodata['_source']['sourceURL'])
+            ID = re.sub(r"^.+/", "", repodata["_source"]["sourceURL"])
         for xID, xrepo in repos.items():
             if xID in repodatas:
                 xrepodata = repodatas[xID]
-                if indata.get('collapse'):
-                    m = re.search(indata.get('collapse'), xrepodata['_source']['sourceURL'])
+                if indata.get("collapse"):
+                    m = re.search(
+                        indata.get("collapse"), xrepodata["_source"]["sourceURL"]
+                    )
                     if m:
                         xID = m.group(1)
                 else:
-                    xID = re.sub(r"^.+/", "", xrepodata['_source']['sourceURL'])
+                    xID = re.sub(r"^.+/", "", xrepodata["_source"]["sourceURL"])
                 if xID != ID:
                     xlinks = []
                     for author in xrepo:
                         if author in repo:
                             xlinks.append(author)
-                    lname = "%s@%s" % (ID, xID) # Link name
-                    rname = "%s@%s" % (xID, ID) # Reverse link name
+                    lname = "%s@%s" % (ID, xID)  # Link name
+                    rname = "%s@%s" % (xID, ID)  # Reverse link name
                     if len(xlinks) >= minLinks and not rname in repo_links:
                         mylinks[xID] = len(xlinks)
-                        repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B?
+                        repo_links[lname] = repo_links.get(lname, 0) + len(
+                            xlinks
+                        )  # How many contributors in common between project A and B?
                         if repo_links[lname] > max_shared:
                             max_shared = repo_links[lname]
         if ID not in repo_notoriety:
             repo_notoriety[ID] = set()
-        repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to?
+        repo_notoriety[ID].update(
+            mylinks.keys()
+        )  # How many projects is this repo connected to?
 
         if ID not in repo_authors:
             repo_authors[ID] = set()
-        repo_authors[ID].update(repo) # How many projects is this repo connected to?
+        repo_authors[ID].update(repo)  # How many projects is this repo connected to?
 
         if ID != oID:
             repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID]
             if repo_commits[ID] > max_commits:
-                max_commits = repo_commits[ID] # Used for calculating max link thickness
+                max_commits = repo_commits[
+                    ID
+                ]  # Used for calculating max link thickness
         if len(repo_notoriety[ID]) > max_links:
             max_links = len(repo_notoriety[ID])
         if len(repo_authors[ID]) > max_authors:
-            max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts
+            max_authors = len(
+                repo_authors[ID]
+            )  # Used for calculating max sphere size in charts
 
     # Now, pull it all together!
     nodes = []
@@ -269,45 +250,44 @@ def run(API, environ, indata, session):
     for sourceID in repo_notoriety.keys():
         lsize = 0
         for k in repo_links.keys():
-            fr, to = k.split('@')
+            fr, to = k.split("@")
             if fr == sourceID or to == sourceID:
                 lsize += 1
         asize = len(repo_authors[sourceID])
         doc = {
-            'id': sourceID,
-            'name': sourceID,
-            'issues': repo_commits[sourceID],
-            'authors': asize,
-            'links': lsize,
-            'size': max(5, (1 - abs(math.log10(asize / max_authors))) * 45),
-            'tooltip': "%u connections, %u contributors, %u issues" % (lsize, asize, repo_commits[sourceID])
+            "id": sourceID,
+            "name": sourceID,
+            "issues": repo_commits[sourceID],
+            "authors": asize,
+            "links": lsize,
+            "size": max(5, (1 - abs(math.log10(asize / max_authors))) * 45),
+            "tooltip": "%u connections, %u contributors, %u issues"
+            % (lsize, asize, repo_commits[sourceID]),
         }
         nodes.append(doc)
         existing_repos.append(sourceID)
 
     for k, s in repo_links.items():
         size = s
-        fr, to = k.split('@')
+        fr, to = k.split("@")
         if fr in existing_repos and to in existing_repos:
             doc = {
-                'source': fr,
-                'target': to,
-                'value': max(1, (size/max_shared) * 8),
-                'name': "%s &#8596; %s" % (fr, to),
-                'tooltip': "%u contributors in common" % size
+                "source": fr,
+                "target": to,
+                "value": max(1, (size / max_shared) * 8),
+                "name": "%s &#8596; %s" % (fr, to),
+                "tooltip": "%u contributors in common" % size,
             }
             links.append(doc)
 
     JSON_OUT = {
-        'maxLinks': max_links,
-        'maxShared': max_shared,
-        'widgetType': {
-            'chartType': 'link'  # Recommendation for the UI
-        },
-        'links': links,
-        'nodes': nodes,
-        'interval': interval,
-        'okay': True,
-        'responseTime': time.time() - now
+        "maxLinks": max_links,
+        "maxShared": max_shared,
+        "widgetType": {"chartType": "link"},  # Recommendation for the UI
+        "links": links,
+        "nodes": nodes,
+        "interval": interval,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/retention.py b/kibble/api/pages/issue/retention.py
index 1cdecfd..3766e67 100644
--- a/kibble/api/pages/issue/retention.py
+++ b/kibble/api/pages/issue/retention.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -64,9 +63,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the code contributor retention factor renderer for Kibble
 """
@@ -76,6 +72,7 @@ import time
 import re
 import datetime
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -86,13 +83,14 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    hl = indata.get('span', 12) # By default, we define a contributor as active if having committer in the past year
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    hl = indata.get(
+        "span", 12
+    )  # By default, we define a contributor as active if having committer in the past year
     tnow = datetime.date.today()
     nm = tnow.month - (tnow.month % 3)
     ny = tnow.year
@@ -109,7 +107,7 @@ def run(API, environ, indata, session):
     FoundSomething = False
 
     ny = 1970
-    while ny < cy or (ny == cy and (nm+3) <= tnow.month):
+    while ny < cy or (ny == cy and (nm + 3) <= tnow.month):
         d = datetime.date(ny, nm, 1)
         t = time.mktime(d.timetuple())
         nm += 3
@@ -123,76 +121,49 @@ def run(API, environ, indata, session):
 
         ####################################################################
         ####################################################################
-        dOrg = session.user['defaultOrganisation'] or "apache"
+        dOrg = session.user["defaultOrganisation"] or "apache"
         query = {
-                    'query': {
-                        'bool': {
-                            'must': [
-                                {'range':
-                                    {
-                                        'closed': {
-                                            'from': t,
-                                            'to': tf
-                                        }
-                                    }
-                                },
-                                {
-                                    'term': {
-                                        'organisation': dOrg
-                                    }
-                                }
-                            ]
-                        }
-                    }
+            "query": {
+                "bool": {
+                    "must": [
+                        {"range": {"closed": {"from": t, "to": tf}}},
+                        {"term": {"organisation": dOrg}},
+                    ]
                 }
+            }
+        }
         # Source-specific or view-specific??
-        if indata.get('source'):
-            query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+        if indata.get("source"):
+            query["query"]["bool"]["must"].append(
+                {"term": {"sourceID": indata.get("source")}}
+            )
         elif viewList:
-            query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+            query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
         # Get an initial count of commits
-        res = session.DB.ES.count(
-                index=session.DB.dbname,
-                doc_type="issue",
-                body = query
-            )
+        res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
 
-        globcount = res['count']
+        globcount = res["count"]
         if globcount == 0 and FoundSomething == False:
             continue
         FoundSomething = True
 
         # Get top 1000 committers this period
-        query['aggs'] = {
-                'by_o': {
-                    'terms': {
-                        'field': 'issueCloser',
-                        'size': 50000
-                    }
-                },
-                'by_c': {
-                    'terms': {
-                        'field': 'issueCreator',
-                        'size': 50000
-                    }
-                }
-            }
+        query["aggs"] = {
+            "by_o": {"terms": {"field": "issueCloser", "size": 50000}},
+            "by_c": {"terms": {"field": "issueCreator", "size": 50000}},
+        }
         res = session.DB.ES.search(
-                index=session.DB.dbname,
-                doc_type="issue",
-                size = 0,
-                body = query
-            )
-
+            index=session.DB.dbname, doc_type="issue", size=0, body=query
+        )
 
         retained = 0
         added = 0
         lost = 0
 
         thisPeriod = []
-        for bucket in res['aggregations']['by_o']['buckets']:
-            who = bucket['key']
+        for bucket in res["aggregations"]["by_o"]["buckets"]:
+            who = bucket["key"]
             thisPeriod.append(who)
             if who not in peopleSeen:
                 peopleSeen[who] = tf
@@ -201,8 +172,8 @@ def run(API, environ, indata, session):
             if who not in allPeople:
                 allPeople[who] = tf
 
-        for bucket in res['aggregations']['by_c']['buckets']:
-            who = bucket['key']
+        for bucket in res["aggregations"]["by_c"]["buckets"]:
+            who = bucket["key"]
             thisPeriod.append(who)
             if who not in peopleSeen:
                 peopleSeen[who] = tf
@@ -214,7 +185,7 @@ def run(API, environ, indata, session):
 
         prune = []
         for k, v in activePeople.items():
-            if v < (t - (hl*30.45*86400)):
+            if v < (t - (hl * 30.45 * 86400)):
                 prune.append(k)
                 lost += 1
 
@@ -222,46 +193,49 @@ def run(API, environ, indata, session):
             del activePeople[who]
             del peopleSeen[who]
         retained = len(activePeople) - added
-        ts.append({
-            'date': tf,
-            'People who (re)joined': added,
-            'People who quit': lost,
-            'People retained': retained,
-            'Active people': added + retained
-        })
+        ts.append(
+            {
+                "date": tf,
+                "People who (re)joined": added,
+                "People who quit": lost,
+                "People retained": retained,
+                "Active people": added + retained,
+            }
+        )
 
     groups = [
-        ['More than 5 years', (5*365*86400)+1],
-        ['2 - 5 years', (2*365*86400)+1],
-        ['1 - 2 years', (365*86400)],
-        ['Less than a year', 1]
+        ["More than 5 years", (5 * 365 * 86400) + 1],
+        ["2 - 5 years", (2 * 365 * 86400) + 1],
+        ["1 - 2 years", (365 * 86400)],
+        ["Less than a year", 1],
     ]
 
     counts = {}
     totExp = 0
     for person, age in activePeople.items():
         totExp += time.time() - allPeople[person]
-        for el in sorted(groups, key = lambda x: x[1], reverse = True):
+        for el in sorted(groups, key=lambda x: x[1], reverse=True):
             if allPeople[person] <= time.time() - el[1]:
                 counts[el[0]] = counts.get(el[0], 0) + 1
                 break
-    avgyr = (totExp / (86400*365)) / max(len(activePeople),1)
+    avgyr = (totExp / (86400 * 365)) / max(len(activePeople), 1)
 
-    ts = sorted(ts, key = lambda x: x['date'])
+    ts = sorted(ts, key=lambda x: x["date"])
 
     avgm = ""
     yr = int(avgyr)
-    ym = round((avgyr-yr)*12)
+    ym = round((avgyr - yr) * 12)
     if yr >= 1:
         avgm += "%u year%s" % (yr, "s" if yr != 1 else "")
     if ym > 0:
         avgm += "%s%u month%s" % (", " if yr > 0 else "", ym, "s" if ym != 1 else "")
     JSON_OUT = {
-        'text': "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s." % (hl, avgm),
-        'timeseries': ts,
-        'counts': counts,
-        'averageYears': avgyr,
-        'okay': True,
-        'responseTime': time.time() - now,
+        "text": "This shows Contributor retention as calculated over a %u month timespan. The average experience of currently active people is %s."
+        % (hl, avgm),
+        "timeseries": ts,
+        "counts": counts,
+        "averageYears": avgyr,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/top-count.py b/kibble/api/pages/issue/top-count.py
index 4e3f5ae..38273ce 100644
--- a/kibble/api/pages/issue/top-count.py
+++ b/kibble/api/pages/issue/top-count.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the TopN repos by commits list renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import re
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,78 +80,61 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [
-            {'term': {'issueCreator': indata.get('email')}},
-            {'term': {'issueCloser': indata.get('email')}}
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
         ]
-        query['query']['bool']['minimum_should_match'] = 1
-
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get top 25 committers this period
-    query['aggs'] = {
-            'by_repo': {
-                'terms': {
-                    'field': 'sourceID',
-                    'size': 5000
-                }
-            }
-        }
+    query["aggs"] = {"by_repo": {"terms": {"field": "sourceID", "size": 5000}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
 
     toprepos = []
-    for bucket in res['aggregations']['by_repo']['buckets']:
-        ID = bucket['key']
-        if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id = ID):
-            it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id = ID)['_source']
-            repo = re.sub(r".+/([^/]+)$", r"\1", it['sourceURL'])
-            count = bucket['doc_count']
+    for bucket in res["aggregations"]["by_repo"]["buckets"]:
+        ID = bucket["key"]
+        if session.DB.ES.exists(index=session.DB.dbname, doc_type="source", id=ID):
+            it = session.DB.ES.get(index=session.DB.dbname, doc_type="source", id=ID)[
+                "_source"
+            ]
+            repo = re.sub(r".+/([^/]+)$", r"\1", it["sourceURL"])
+            count = bucket["doc_count"]
             toprepos.append([repo, count])
 
-    toprepos = sorted(toprepos, key = lambda x: x[1], reverse = True)
+    toprepos = sorted(toprepos, key=lambda x: x[1], reverse=True)
     top = toprepos[0:24]
     if len(toprepos) > 25:
         count = 0
@@ -166,9 +146,5 @@ def run(API, environ, indata, session):
     for v in top:
         tophash[v[0]] = v[1]
 
-    JSON_OUT = {
-        'counts': tophash,
-        'okay': True,
-        'responseTime': time.time() - now,
-    }
+    JSON_OUT = {"counts": tophash, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/top.py b/kibble/api/pages/issue/top.py
index 87a05f1..70fae4d 100644
--- a/kibble/api/pages/issue/top.py
+++ b/kibble/api/pages/issue/top.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the issue actors stats page for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,80 +80,62 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                },
-                'sort': {
-                    'comments': 'desc'
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        },
+        "sort": {"comments": "desc"},
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 25,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="issue", size=25, body=query
+    )
     top = []
-    for bucket in res['hits']['hits']:
-        doc = bucket['_source']
-        doc['source'] = doc.get('url', '#')
-        doc['name'] = doc.get('key', 'unknown')
-        doc['subject'] = doc.get('title')
-        doc['count'] = doc.get('comments', 0)
+    for bucket in res["hits"]["hits"]:
+        doc = bucket["_source"]
+        doc["source"] = doc.get("url", "#")
+        doc["name"] = doc.get("key", "unknown")
+        doc["subject"] = doc.get("title")
+        doc["count"] = doc.get("comments", 0)
         top.append(doc)
 
-
     JSON_OUT = {
-        'topN': {
-            'denoter': 'interactions',
-            'icon': 'bug',
-            'items': top
-        },
-        'okay': True,
-        'responseTime': time.time() - now,
-        'widgetType': {
-            'chartType': 'line'
-        }
+        "topN": {"denoter": "interactions", "icon": "bug", "items": top},
+        "okay": True,
+        "responseTime": time.time() - now,
+        "widgetType": {"chartType": "line"},
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/issue/trends.py b/kibble/api/pages/issue/trends.py
index 61c528b..f43971e 100644
--- a/kibble/api/pages/issue/trends.py
+++ b/kibble/api/pages/issue/trends.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the Issue trends renderer for Kibble
 """
@@ -72,6 +68,7 @@ This is the Issue trends renderer for Kibble
 import json
 import time
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -82,20 +79,20 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
     if dateFrom < 0:
         dateFrom = 0
     dateYonder = dateFrom - (dateTo - dateFrom)
 
-
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
 
     ####################################################################
     # We start by doing all the queries for THIS period.               #
@@ -103,257 +100,164 @@ def run(API, environ, indata, session):
     # and rerun the same queries.                                      #
     ####################################################################
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get number of issues created, this period
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_created = res['count']
-
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_created = res["count"]
 
     # Get number of open/close, this period
-    query['aggs'] = {
-            'opener': {
-                'cardinality': {
-                    'field': 'issueCreator'
-                }
-            }
-        }
+    query["aggs"] = {"opener": {"cardinality": {"field": "issueCreator"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
-    no_creators = res['aggregations']['opener']['value']
-
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
+    no_creators = res["aggregations"]["opener"]["value"]
 
     # CLOSERS
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'closed': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"closed": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get number of issues created, this period
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_closed = res['count']
-
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_closed = res["count"]
 
     # Get number of open/close, this period
-    query['aggs'] = {
-            'closer': {
-                'cardinality': {
-                    'field': 'issueCloser'
-                }
-            }
-        }
+    query["aggs"] = {"closer": {"cardinality": {"field": "issueCloser"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
-    no_closers = res['aggregations']['closer']['value']
-
-
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
+    no_closers = res["aggregations"]["closer"]["value"]
 
     ####################################################################
     # Change to PRIOR SPAN                                             #
     ####################################################################
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'created': {
-                                        'from': dateYonder,
-                                        'to': dateFrom-1
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"created": {"from": dateYonder, "to": dateFrom - 1}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     if viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get number of issues, this period
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_created_before = res['count']
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_created_before = res["count"]
 
     # Get number of committers, this period
-    query['aggs'] = {
-            'opener': {
-                'cardinality': {
-                    'field': 'issueCreator'
-                }
-            }
-        }
+    query["aggs"] = {"opener": {"cardinality": {"field": "issueCreator"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
-    no_creators_before = res['aggregations']['opener']['value']
-
-
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
+    no_creators_before = res["aggregations"]["opener"]["value"]
 
     # CLOSERS
 
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'closed': {
-                                        'from': dateYonder,
-                                        'to': dateFrom-1
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"closed": {"from": dateYonder, "to": dateFrom - 1}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     if viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('email'):
-        query['query']['bool']['should'] = [{'term': {'issueCreator': indata.get('email')}}, {'term': {'issueCloser': indata.get('email')}}]
-        query['query']['bool']['minimum_should_match'] = 1
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("email"):
+        query["query"]["bool"]["should"] = [
+            {"term": {"issueCreator": indata.get("email")}},
+            {"term": {"issueCloser": indata.get("email")}},
+        ]
+        query["query"]["bool"]["minimum_should_match"] = 1
 
     # Get number of issues created, this period
-    res = session.DB.ES.count(
-            index=session.DB.dbname,
-            doc_type="issue",
-            body = query
-        )
-    no_issues_closed_before = res['count']
-
+    res = session.DB.ES.count(index=session.DB.dbname, doc_type="issue", body=query)
+    no_issues_closed_before = res["count"]
 
     # Get number of open/close, this period
-    query['aggs'] = {
-            'closer': {
-                'cardinality': {
-                    'field': 'issueCloser'
-                }
-            }
-        }
+    query["aggs"] = {"closer": {"cardinality": {"field": "issueCloser"}}}
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="issue",
-            size = 0,
-            body = query
-        )
-    no_closers_before = res['aggregations']['closer']['value']
-
+        index=session.DB.dbname, doc_type="issue", size=0, body=query
+    )
+    no_closers_before = res["aggregations"]["closer"]["value"]
 
     trends = {
         "created": {
-            'before': no_issues_created_before,
-            'after': no_issues_created,
-            'title': "Issues opened this period"
+            "before": no_issues_created_before,
+            "after": no_issues_created,
+            "title": "Issues opened this period",
         },
         "authors": {
-            'before': no_creators_before,
-            'after': no_creators,
-            'title': "People opening issues this period"
+            "before": no_creators_before,
+            "after": no_creators,
+            "title": "People opening issues this period",
         },
         "closed": {
-            'before': no_issues_closed_before,
-            'after': no_issues_closed,
-            'title': "Issues closed this period"
+            "before": no_issues_closed_before,
+            "after": no_issues_closed,
+            "title": "Issues closed this period",
         },
         "closers": {
-            'before': no_closers_before,
-            'after': no_closers,
-            'title': "People closing issues this period"
-        }
+            "before": no_closers_before,
+            "after": no_closers,
+            "title": "People closing issues this period",
+        },
     }
 
-    JSON_OUT = {
-        'trends': trends,
-        'okay': True,
-        'responseTime': time.time() - now
-    }
+    JSON_OUT = {"trends": trends, "okay": True, "responseTime": time.time() - now}
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/mail/keyphrases.py b/kibble/api/pages/mail/keyphrases.py
index ed03282..ead6d4b 100644
--- a/kibble/api/pages/mail/keyphrases.py
+++ b/kibble/api/pages/mail/keyphrases.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the common key phrases renderer for Kibble
 """
@@ -73,6 +69,7 @@ import json
 import time
 import hashlib
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -83,76 +80,52 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
 
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
-
-    interval = indata.get('interval', 'month')
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
 
+    interval = indata.get("interval", "month")
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'ts': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                },
-                'aggs': {
-                    'kpe': {
-                        'terms': {
-                            'field': 'kpe.keyword',
-                            'size': 50
-                        }
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"ts": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        },
+        "aggs": {"kpe": {"terms": {"field": "kpe.keyword", "size": 50}}},
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
 
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="email",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="email", size=0, body=query
+    )
 
     topN = []
-    for bucket in res['aggregations']['kpe']['buckets']:
-        topN.append( {
-            'phrase': bucket['key'],
-            'count': bucket['doc_count']
-        })
+    for bucket in res["aggregations"]["kpe"]["buckets"]:
+        topN.append({"phrase": bucket["key"], "count": bucket["doc_count"]})
 
     JSON_OUT = {
-        'widgetType': {
-            'chartType': 'bar'
-        },
-        'phrases': topN,
-        'okay': True,
-        'responseTime': time.time() - now
+        "widgetType": {"chartType": "bar"},
+        "phrases": topN,
+        "okay": True,
+        "responseTime": time.time() - now,
     }
     yield json.dumps(JSON_OUT)
diff --git a/kibble/api/pages/mail/map.py b/kibble/api/pages/mail/map.py
index 14f7170..ba188ea 100644
--- a/kibble/api/pages/mail/map.py
+++ b/kibble/api/pages/mail/map.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -62,9 +61,6 @@
 ########################################################################
 
 
-
-
-
 """
 This is the committer relationship list renderer for Kibble
 """
@@ -78,6 +74,7 @@ import math
 
 badBots = r"(JIRA|Hudson|jira|jenkins|GitHub|git@|dev@|bugzilla|gerrit)"
 
+
 def run(API, environ, indata, session):
 
     # We need to be logged in for this!
@@ -88,69 +85,61 @@ def run(API, environ, indata, session):
 
     # First, fetch the view if we have such a thing enabled
     viewList = []
-    if indata.get('view'):
-        viewList = session.getView(indata.get('view'))
-    if indata.get('subfilter'):
-        viewList = session.subFilter(indata.get('subfilter'), view = viewList)
-
-    dateTo = indata.get('to', int(time.time()))
-    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    if indata.get("view"):
+        viewList = session.getView(indata.get("view"))
+    if indata.get("subfilter"):
+        viewList = session.subFilter(indata.get("subfilter"), view=viewList)
+
+    dateTo = indata.get("to", int(time.time()))
+    dateFrom = indata.get(
+        "from", dateTo - (86400 * 30 * 6)
+    )  # Default to a 6 month span
     span = dateTo - dateFrom
 
     ####################################################################
     ####################################################################
-    dOrg = session.user['defaultOrganisation'] or "apache"
+    dOrg = session.user["defaultOrganisation"] or "apache"
     query = {
-                'query': {
-                    'bool': {
-                        'must': [
-                            {'range':
-                                {
-                                    'ts': {
-                                        'from': dateFrom,
-                                        'to': dateTo
-                                    }
-                                }
-                            },
-                            {
-                                'term': {
-                                    'organisation': dOrg
-                                }
-                            }
-                        ]
-                    }
-                }
+        "query": {
+            "bool": {
+                "must": [
+                    {"range": {"ts": {"from": dateFrom, "to": dateTo}}},
+                    {"term": {"organisation": dOrg}},
+                ]
             }
+        }
+    }
     # Source-specific or view-specific??
-    if indata.get('source'):
-        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    if indata.get("source"):
+        query["query"]["bool"]["must"].append(
+            {"term": {"sourceID": indata.get("source")}}
+        )
     elif viewList:
-        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
-    if indata.get('search'):
-        query['query']['bool']['must'].append({'regexp': {'subject': indata.get('search')}})
+        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
+    if indata.get("search"):
+        query["query"]["bool"]["must"].append(
+            {"regexp": {"subject": indata.get("search")}}
+        )
 
-    if indata.get('email'):
-        query['query']['bool']['minimum_should_match'] = 1
-        query['query']['bool']['should'] = [
-            {'term': {'replyto.keyword': indata.get('email')}},
-            {'term': {'sender': indata.get('email')}},
-            ]
+    if indata.get("email"):
+        query["query"]["bool"]["minimum_should_match"] = 1
+        query["query"]["bool"]["should"] = [
+            {"term": {"replyto.keyword": indata.get("email")}},
+            {"term": {"sender": indata.get("email")}},
+        ]
 
     # Get number of commits, this period, per repo
-    query['aggs'] = {
-            'per_ml': {
-                'terms': {
-                    'field': 'replyto.keyword' if not indata.get('author') else 'sender',
-                    'size': 150
-                }
+    query["aggs"] = {
+        "per_ml": {
+            "terms": {
+                "field": "replyto.keyword" if not indata.get("author") else "sender",
+                "size": 150,
             }
         }
+    }
     res = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="email",
-            size = 0,
-            body = query
-        )
+        index=session.DB.dbname, doc_type="email", size=0, body=query
+    )
 
     repos = {}
     repo_commits = {}
@@ -159,43 +148,49 @@ def run(API, environ, indata, session):
     max_links = 0
     max_shared = 0
     max_authors = 0
-    minLinks = indata.get('links', 1)
+    minLinks = indata.get("links", 1)
 
-    if indata.get('email'):
-            del query['query']['bool']['should']
-            del query['query']['bool']['minimum_should_match']
+    if indata.get("email"):
+        del query["query"]["bool"]["should"]
+        del query["query"]["bool"]["minimum_should_match"]
 
     # For each repo, count commits and gather data on authors
-    for doc in res['aggregations']['per_ml']['buckets']:
-        sourceID = doc['key']
-        emails = doc['doc_count']
-        if re.search(badBots, sourceID): # No bots
+    for doc in res["aggregations"]["per_ml"]["buckets"]:
+        sourceID = doc["key"]
+        emails = doc["doc_count"]
+        if re.search(badBots, sourceID):  # No bots
             continue
-        if emails > (span/86400)*4: # More than 4/day and we consider you a bot!
+        if emails > (span / 86400) * 4:  # More than 4/day and we consider you a bot!
             continue
 
-
         # Gather the unique authors/committers
-        query['aggs'] = {
-            'per_ml': {
-                'terms': {
-                    'field': 'sender' if not indata.get('author') else 'replyto.keyword',
-                    'size': 5000
+        query["aggs"] = {
+            "per_ml": {
+                "terms": {
+                    "field": "sender"
+                    if not indata.get("author")
+                    else "replyto.keyword",
+                    "size": 5000,
                 }
             }
         }
         xquery = copy.deepcopy(query)
 
-        xquery['query']['bool']['must'].append({'term': {'replyto.keyword' if not indata.get('author') else 'sender': sourceID}})
+        xquery["query"]["bool"]["must"].append(
+            {
+                "term": {
+                    "replyto.keyword"
+                    if not indata.get("author")
+                    else "sender": sourceID
+                }
+            }
+        )
         xres = session.DB.ES.search(
-            index=session.DB.dbname,
-            doc_type="email",
-            size = 0,
-            body = xquery
+            index=session.DB.dbname, doc_type="email", size=0, body=xquery
         )
         authors = []
-        for person in xres['aggregations']['per_ml']['buckets']:
-            pk = person['key']
+        for person in xres["aggregations"]["per_ml"]["buckets"]:
+            pk = person["key"]
             authors.append(pk)
         if emails > max_emails:
             max_emails = emails
@@ -211,10 +206,14 @@ def run(API, environ, indata, session):
     # Grab data of all sources
     for ID, repo in repos.items():
         mylinks = {}
-        hID = hashlib.sha1( ("%s%s" % (dOrg, ID)).encode('ascii', errors='replace')).hexdigest()
-        if not session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id = hID):
+        hID = hashlib.sha1(
+            ("%s%s" % (dOrg, ID)).encode("ascii", errors="replace")
+        ).hexdigest()
+        if not session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=hID):
             continue
-        repodatas[ID] = session.DB.ES.get(index=session.DB.dbname, doc_type="person", id = hID)
+        repodatas[ID] = session.DB.ES.get(
+            index=session.DB.dbname, doc_type="person", id=hID
+        )
 
     for ID, repo in repos.items():
         mylinks = {}
@@ -222,47 +221,57 @@ def run(API, environ, indata, session):
             continue
         repodata = repodatas[ID]
         oID = ID
-        if indata.get('collapse'):
-            m = re.search(indata.get('collapse'), repodata['_source']['email'])
+        if indata.get("collapse"):
+            m = re.search(indata.get("collapse"), repodata["_source"]["email"])
             if m:
                 ID = m.group(1)
         xlinks = []
         for xID, xrepo in repos.items():
             if xID in repodatas:
                 xrepodata = repodatas[xID]
-                if indata.get('collapse'):
-                    m = re.search(indata.get('collapse'), xrepodata['_source']['email'])
+                if indata.get("collapse"):
+                    m = re.search(indata.get("collapse"), xrepodata["_source"]["email"])
                     if m:
                         xID = m.group(1)
                 if xID != ID:
 
                     if ID in xrepo:
                         xlinks.append(xID)
-                        lname = "%s||%s" % (ID, xID) # Link name
-                        rname = "%s||%s" % (xID, ID) # Reverse link name
-                        if len(xlinks) > 0 and rname not in repo_links and len(xlinks) >= minLinks:
+                        lname = "%s||%s" % (ID, xID)  # Link name
+                        rname = "%s||%s" % (xID, ID)  # Reverse link name
+                        if (
+                            len(xlinks) > 0
+                            and rname not in repo_links
+                            and len(xlinks) >= minLinks
+                        ):
                             mylinks[ID] = mylinks.get(ID, 0) + 1
-                            repo_links[lname] = repo_links.get(lname, 0) + len(xlinks) # How many contributors in common between project A and B?
+                            repo_links[lname] = repo_links.get(lname, 0) + len(
+                                xlinks
+                            )  # How many contributors in common between project A and B?
                             if repo_links[lname] > max_shared:
                                 max_shared = repo_links[lname]
                         elif rname in repo_links:
                             repo_links[rname] = repo_links.get(rname, 0) + len(xlinks)
         if ID not in repo_notoriety:
             repo_notoriety[ID] = set()
-        repo_notoriety[ID].update(mylinks.keys()) # How many projects is this repo connected to?
+        repo_notoriety[ID].update(
+            mylinks.keys()
+        )  # How many projects is this repo connected to?
 
         if ID not in repo_authors:
             repo_authors[ID] = set()
-        repo_authors[ID].update(repo) # How many projects is this repo connected to?
+        repo_authors[ID].update(repo)  # How many projects is this repo connected to?
 
         if ID != oID:
             repo_commits[ID] = repo_commits.get(ID, 0) + repo_commits[oID]
             if repo_commits[ID] > max_emails:
-                max_emails = repo_commits[ID] # Used for calculating max link thickness
+                max_emails = repo_commits[ID]  # Used for calculating max link thickness
         if len(repo_notoriety[ID]) > max_links:
             max_links = len(repo_notoriety[ID])
         if len(repo_authors[ID]) > max_authors:
-            max_authors = len(repo_authors[ID]) # Used for calculating max sphere size in charts
+            max_authors = len(
+                repo_authors[ID]
+            )  # Used for calculating max sphere size in charts
 
     # Now, pull it all together!
     nodes = []
@@ -271,45 +280,46 @@ def run(API, environ, indata, session):
     for sourceID, ns in repo_notoriety.items():
         lsize = 0
         for k in repo_links.keys():
-            fr, to = k.split('||')
+            fr, to = k.split("||")
... 5496 lines suppressed ...