You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwebbeans.apache.org by wa...@apache.org on 2021/06/07 18:34:01 UTC

[openwebbeans-site] 05/16: refactor - show content change

This is an automated email from the ASF dual-hosted git repository.

wave pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/openwebbeans-site.git

commit 4b687fc4a71c92c85d8889a4802730cfc17596a8
Author: Dave Fisher <da...@davefisher.tech>
AuthorDate: Mon Jun 7 09:40:06 2021 -0700

    refactor - show content change
---
 theme/plugins/asfdata.py | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/theme/plugins/asfdata.py b/theme/plugins/asfdata.py
index 7b72ee2..ae98d23 100644
--- a/theme/plugins/asfdata.py
+++ b/theme/plugins/asfdata.py
@@ -352,6 +352,20 @@ def get_element_text(entry, child):
     return get_node_text(elements[0].childNodes)
 
 
+# retrieve truncate words in html.
+def truncate_words(text, words):
+    content_text = ' '.join(text.split(' ')[:words]) + "..."
+    print(content_text)
+    for regex, replace in FIXUP_HTML:
+        m = regex.search(content_text)
+        if m:
+            content_text = re.sub(regex, replace, content_text)
+    tree_soup = BeautifulSoup(content_text, 'html.parser')
+    content_text = tree_soup.decode(formatter='html')
+    print(content_text)
+    return content_text
+
+
 # retrieve blog posts from an Atom feed.
 def process_blog(feed, count, words, debug):
     print(f'blog feed: {feed}')
@@ -368,13 +382,7 @@ def process_blog(feed, count, words, debug):
         # we may want content
         content_text = ''
         if words:
-            content_text = ' '.join(get_element_text(entry, 'content').split(' ')[:words]) + "..."
-            for regex, replace in FIXUP_HTML:
-                m = regex.search(content_text)
-                if m:
-                    content_text = re.sub(regex, replace, content_text)
-            tree_soup = BeautifulSoup(content_text, 'html.parser')
-            content_text = tree_soup.decode(formatter='html')
+            content_text = truncate_words(get_element_text(entry, 'content'), words)
         # we want the title and href
         v.append(
             {