You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@allura.apache.org by br...@apache.org on 2013/04/24 19:16:20 UTC

[11/28] git commit: [#2835] ticket:292 Refactored wiki search into helper

[#2835] ticket:292 Refactored wiki search into helper


Project: http://git-wip-us.apache.org/repos/asf/incubator-allura/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-allura/commit/891ff450
Tree: http://git-wip-us.apache.org/repos/asf/incubator-allura/tree/891ff450
Diff: http://git-wip-us.apache.org/repos/asf/incubator-allura/diff/891ff450

Branch: refs/heads/db/2835
Commit: 891ff4503a5d1e0c951405ac0bde7a5fd1bfc94d
Parents: 1f5a295
Author: Igor Bondarenko <je...@gmail.com>
Authored: Mon Mar 25 11:56:00 2013 +0000
Committer: Dave Brondsema <db...@slashdotmedia.com>
Committed: Wed Apr 24 16:34:41 2013 +0000

----------------------------------------------------------------------
 Allura/allura/lib/search.py      |  103 +++++++++++++++++++++++++++++++++
 ForgeWiki/forgewiki/wiki_main.py |  100 +++++---------------------------
 2 files changed, 117 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-allura/blob/891ff450/Allura/allura/lib/search.py
----------------------------------------------------------------------
diff --git a/Allura/allura/lib/search.py b/Allura/allura/lib/search.py
index d9c300a..c541225 100644
--- a/Allura/allura/lib/search.py
+++ b/Allura/allura/lib/search.py
@@ -18,12 +18,17 @@
 import re
 import socket
 from logging import getLogger
+from urllib import urlencode
+from itertools import imap
 
 import markdown
 import jinja2
+from tg import redirect, url
 from pylons import tmpl_context as c, app_globals as g
+from pylons import request
 from pysolr import SolrError
 
+from allura.lib import helpers as h
 from .markdown_extensions import ForgeExtension
 
 log = getLogger(__name__)
@@ -83,6 +88,104 @@ def search_artifact(atype, q, history=False, rows=10, short_timeout=False, **kw)
         fq.append('is_history_b:False')
     return search(q, fq=fq, rows=rows, short_timeout=short_timeout, ignore_errors=False, **kw)
 
+
+def search_app(q='', fq=None, **kw):
+    """Helper for app search.
+
+    Uses dismax query parser. Matches on `title` and `text`. Handles paging, sorting, etc
+    """
+    history = kw.pop('history', None)
+    if kw.pop('project', False):
+        redirect(c.project.url() + 'search?' + urlencode(dict(q=q, history=history)))
+    search_comments = kw.pop('search_comments', None)
+    limit = kw.pop('limit', None)
+    page = kw.pop('page', 0)
+    default = kw.pop('default', 25)
+    allowed_types = kw.pop('allowed_types', [])
+    parser = kw.pop('parser', None)
+    sort = kw.pop('sort', 'score desc')
+    fq = fq if fq else []
+    search_error = None
+    results = []
+    count = 0
+    matches = {}
+    limit, page, start = g.handle_paging(limit, page, default=default)
+    if not q:
+        q = ''
+    else:
+        # Match on both `title` and `text` by default, using 'dismax' parser.
+        # Score on `title` matches is boosted, so title match is better than body match.
+        # It's 'fuzzier' than standard parser, which matches only on `text`.
+        if search_comments:
+            allowed_types += ['Post']
+        search_params = {
+            'qt': 'dismax',
+            'qf': 'title^2 text',
+            'pf': 'title^2 text',
+            'fq': [
+                'project_id_s:%s'  % c.project._id,
+                'mount_point_s:%s' % c.app.config.options.mount_point,
+                '-deleted_b:true',
+                'type_s:(%s)' % ' OR '.join(['"%s"' % t for t in allowed_types])
+            ] + fq,
+            'hl': 'true',
+            'hl.simple.pre': '<strong>',
+            'hl.simple.post': '</strong>',
+            'sort': sort,
+        }
+        if not history:
+           search_params['fq'].append('is_history_b:False')
+        if parser == 'standard':
+            search_params.pop('qt', None)
+            search_params.pop('qf', None)
+            search_params.pop('pf', None)
+        try:
+            results = search(
+                q, short_timeout=True, ignore_errors=False,
+                rows=limit, start=start, **search_params)
+        except SearchError as e:
+            search_error = e
+        if results:
+            count = results.hits
+            matches = results.highlighting
+            def historize_urls(doc):
+                if doc.get('type_s', '').endswith(' Snapshot'):
+                    if doc.get('url_s'):
+                        doc['url_s'] = doc['url_s'] + '?version=%s' % doc.get('version_i')
+                return doc
+            def add_matches(doc):
+                m = matches.get(doc['id'], {})
+                doc['title_match'] = h.get_first(m, 'title')
+                doc['text_match'] = h.get_first(m, 'text')
+                if not doc['text_match']:
+                    doc['text_match'] = h.get_first(doc, 'text')
+                return doc
+            results = imap(historize_urls, results)
+            results = imap(add_matches, results)
+
+    # Provide sort urls to the view
+    score_url = 'score desc'
+    date_url = 'mod_date_dt desc'
+    try:
+        field, order = sort.split(' ')
+    except ValueError:
+        field, order = 'score', 'desc'
+    sort = ' '.join([field, 'asc' if order == 'desc' else 'desc'])
+    if field == 'score':
+        score_url = sort
+    elif field == 'mod_date_dt':
+        date_url = sort
+    params = request.GET.copy()
+    params.update({'sort': score_url})
+    score_url = url(request.path, params=params)
+    params.update({'sort': date_url})
+    date_url = url(request.path, params=params)
+    return dict(q=q, history=history, results=results or [],
+                count=count, limit=limit, page=page, search_error=search_error,
+                sort_score_url=score_url, sort_date_url=date_url,
+                sort_field=field)
+
+
 def find_shortlinks(text):
     md = markdown.Markdown(
         extensions=['codehilite', ForgeExtension(), 'tables'],

http://git-wip-us.apache.org/repos/asf/incubator-allura/blob/891ff450/ForgeWiki/forgewiki/wiki_main.py
----------------------------------------------------------------------
diff --git a/ForgeWiki/forgewiki/wiki_main.py b/ForgeWiki/forgewiki/wiki_main.py
index eed1467..b2dbee7 100644
--- a/ForgeWiki/forgewiki/wiki_main.py
+++ b/ForgeWiki/forgewiki/wiki_main.py
@@ -18,12 +18,11 @@
 #-*- python -*-
 import logging
 from pprint import pformat
-from urllib import urlencode, unquote
+from urllib import unquote
 from datetime import datetime
-from itertools import imap
 
 # Non-stdlib imports
-from tg import expose, validate, redirect, response, flash, url
+from tg import expose, validate, redirect, response, flash
 from tg.decorators import with_trailing_slash, without_trailing_slash
 from tg.controllers import RestController
 from pylons import tmpl_context as c, app_globals as g
@@ -37,7 +36,7 @@ import jinja2
 from allura import model as M
 from allura.lib import helpers as h
 from allura.app import Application, SitemapEntry, DefaultAdminController
-from allura.lib.search import search, SolrError
+from allura.lib.search import search_app
 from allura.lib.decorators import require_post, Property
 from allura.lib.security import require_access, has_access
 from allura.controllers import AppDiscussionController, BaseController
@@ -320,90 +319,19 @@ class RootController(BaseController, DispatchIndex):
                    project=validators.StringBool(if_empty=False)))
     def search(self, q=None, history=None, search_comments=None, project=None, limit=None, page=0, **kw):
         'local wiki search'
-        if project:
-            redirect(c.project.url() + 'search?' + urlencode(dict(q=q, history=history)))
-        search_error = None
-        results = []
-        count = 0
-        parser = kw.pop('parser', None)
-        sort = kw.pop('sort', 'score desc')
-        matches = {}
-        limit, page, start = g.handle_paging(limit, page, default=25)
-        if not q:
-            q = ''
-        else:
-            # Match on both `title` and `text` by default, using 'dismax' parser.
-            # Score on `title` matches is boosted, so title match is better than body match.
-            # It's 'fuzzier' than standard parser, which matches only on `text`.
-            allowed_types = ['WikiPage', 'WikiPage Snapshot']
-            if search_comments:
-                allowed_types += ['Post']
-            search_params = {
-                'qt': 'dismax',
-                'qf': 'title^2 text',
-                'pf': 'title^2 text',
-                'fq': [
-                    'project_id_s:%s'  % c.project._id,
-                    'mount_point_s:%s' % c.app.config.options.mount_point,
-                    '-deleted_b:true',
-                    'type_s:(%s)' % ' OR '.join(['"%s"' % t for t in allowed_types])
-                ],
-                'hl': 'true',
-                'hl.simple.pre': '<strong>',
-                'hl.simple.post': '</strong>',
-                'sort': sort,
-            }
-            if not history:
-               search_params['fq'].append('is_history_b:False')
-            if parser == 'standard':
-                search_params.pop('qt', None)
-                search_params.pop('qf', None)
-                search_params.pop('pf', None)
-            try:
-                results = search(
-                    q, short_timeout=True, ignore_errors=False,
-                    rows=limit, start=start, **search_params)
-            except SolrError as e:
-                search_error = e
-            if results:
-                count = results.hits
-                matches = results.highlighting
-                def historize_urls(doc):
-                    if doc.get('type_s', '').endswith(' Snapshot'):
-                        if doc.get('url_s'):
-                            doc['url_s'] = doc['url_s'] + '?version=%s' % doc.get('version_i')
-                    return doc
-                def add_matches(doc):
-                    m = matches.get(doc['id'], {})
-                    doc['title_match'] = h.get_first(m, 'title')
-                    doc['text_match'] = h.get_first(m, 'text')
-                    if not doc['text_match']:
-                        doc['text_match'] = h.get_first(doc, 'text')
-                    return doc
-                results = imap(historize_urls, results)
-                results = imap(add_matches, results)
         c.search_results = W.search_results
         c.help_modal = W.help_modal
-        score_url = 'score desc'
-        date_url = 'mod_date_dt desc'
-        try:
-            field, order = sort.split(' ')
-        except ValueError:
-            field, order = 'score', 'desc'
-        sort = ' '.join([field, 'asc' if order == 'desc' else 'desc'])
-        if field == 'score':
-            score_url = sort
-        elif field == 'mod_date_dt':
-            date_url = sort
-        params = request.GET.copy()
-        params.update({'sort': score_url})
-        score_url = url(request.path, params=params)
-        params.update({'sort': date_url})
-        date_url = url(request.path, params=params)
-        return dict(q=q, history=history, results=results or [],
-                    count=count, limit=limit, page=page, search_error=search_error,
-                    sort_score_url=score_url, sort_date_url=date_url,
-                    sort_field=field)
+        search_params = kw
+        search_params.update({
+            'q': q or '',
+            'history': history,
+            'search_comments': search_comments,
+            'project': project,
+            'limit': limit,
+            'page': page,
+            'allowed_types': ['WikiPage', 'WikiPage Snapshot'],
+        })
+        return search_app(**search_params)
 
     @with_trailing_slash
     @expose('jinja:forgewiki:templates/wiki/browse.html')