You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kibble.apache.org by hu...@apache.org on 2018/09/17 07:49:59 UTC

[kibble] 01/08: add contributors API point

This is an automated email from the ASF dual-hosted git repository.

humbedooh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kibble.git

commit 75379fbe9e52a4468ef005530a7be4da6cab1d2f
Author: Daniel Gruno <hu...@apache.org>
AuthorDate: Mon Sep 17 09:48:16 2018 +0200

    add contributors API point
---
 api/pages/org/contributors.py | 168 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 168 insertions(+)

diff --git a/api/pages/org/contributors.py b/api/pages/org/contributors.py
new file mode 100644
index 0000000..c42e0f9
--- /dev/null
+++ b/api/pages/org/contributors.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+########################################################################
+# OPENAPI-URI: /api/org/contributors
+########################################################################
+# post:
+#   requestBody:
+#     content:
+#       application/json:
+#         schema:
+#           $ref: '#/components/schemas/defaultWidgetArgs'
+#   responses:
+#     '200':
+#       content:
+#         application/json:
+#           schema:
+#             $ref: '#/components/schemas/contributorList'
+#       description: 200 Response
+#     default:
+#       content:
+#         application/json:
+#           schema:
+#             $ref: '#/components/schemas/Error'
+#       description: unexpected error
+#   security:
+#   - cookieAuth: []
+#   summary: Shows contributors for the entire org or matching filters.
+# 
+########################################################################
+
+
+
+
+
+"""
+This is the contributor list renderer for Kibble
+"""
+
+import json
+import time
+import hashlib
+
+cached_people = {} # Store people we know, so we don't have to fetch them again.
+
+def run(API, environ, indata, session):
+    
+    # We need to be logged in for this!
+    if not session.user:
+        raise API.exception(403, "You must be logged in to use this API endpoint! %s")
+    
+    
+    # First, fetch the view if we have such a thing enabled
+    viewList = []
+    if indata.get('view'):
+        viewList = session.getView(indata.get('view'))
+    if indata.get('subfilter'):
+        viewList = session.subFilter(indata.get('subfilter'), view = viewList) 
+    
+    
+    # Fetch all contributors for the org
+    dOrg = session.user['defaultOrganisation'] or "apache"
+    query = {
+                'query': {
+                    'bool': {
+                        'must': [
+                            {
+                                'term': {
+                                    'organisation': dOrg
+                                }
+                            }
+                        ]
+                    }
+                }
+            }
+    
+    # Source-specific or view-specific??
+    if indata.get('source'):
+        query['query']['bool']['must'].append({'term': {'sourceID': indata.get('source')}})
+    elif viewList:
+        query['query']['bool']['must'].append({'terms': {'sourceID': viewList}})
+    
+    # Date specific?
+    dateTo = indata.get('to', int(time.time()))
+    dateFrom = indata.get('from', dateTo - (86400*30*6)) # Default to a 6 month span
+    query['query']['bool']['must'].append(
+        {'range':
+            {
+                'ts': {
+                    'from': dateFrom,
+                    'to': dateTo
+                }
+            }
+        }
+        )
+    emails = []
+    contribs = {}
+    
+    for field in ['email', 'address', 'author_email', 'assignee', 'issueCreator', 'issueCloser']:
+        N = 0
+        while N < 5:
+            query['aggs'] = {
+                'by_id': {
+                    'terms': {
+                        'field': field,
+                        'size': 10000,
+                        'include': {
+                            'partition': N,
+                            'num_partitions': 5
+                         },
+                    }                
+                }
+            }
+            res = session.DB.ES.search(
+                    index=session.DB.dbname,
+                    doc_type="*",
+                    size = 0,
+                    body = query
+                )
+            # Break if we've found nothing more
+            #if len(res['aggregations']['by_id']['buckets']) == 0:
+                #break
+            # otherwise, add 'em to the pile
+            for k in res['aggregations']['by_id']['buckets']:
+                if k['key'] not in emails:
+                    emails.append(k['key'])
+                    contribs[k['key']] = k['doc_count']
+            N += 1
+            
+    people = []
+    for email in emails:
+        pid = hashlib.sha1( ("%s%s" % (dOrg, email)).encode('ascii', errors='replace')).hexdigest()
+        person = None
+        if pid in cached_people:
+            person = cached_people[pid]
+        else:
+            try:
+                doc = session.DB.ES.get(index=session.DB.dbname, doc_type = 'person', id = pid)
+                cached_people[pid] = {
+                    'name': doc['_source']['name'],
+                    'email': doc['_source']['email'],
+                    'gravatar': hashlib.md5( email.encode('ascii', errors = 'replace')).hexdigest()
+                }
+                person = cached_people[pid]
+            except:
+                pass # Couldn't find 'em, booo
+        if person:
+            person['contributions'] = contribs.get(email, 0)
+            people.append(person)
+        
+    JSON_OUT = {
+        'people': people,
+        'okay': True
+    }
+    yield json.dumps(JSON_OUT)