You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@allura.apache.org by tv...@apache.org on 2013/08/08 15:34:24 UTC
[05/50] git commit: [#6139] ticket:399 Skip pages that can't be
parsed (non-wiki)
[#6139] ticket:399 Skip pages that can't be parsed (non-wiki)
Project: http://git-wip-us.apache.org/repos/asf/incubator-allura/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-allura/commit/be9d8225
Tree: http://git-wip-us.apache.org/repos/asf/incubator-allura/tree/be9d8225
Diff: http://git-wip-us.apache.org/repos/asf/incubator-allura/diff/be9d8225
Branch: refs/heads/tv/6458
Commit: be9d8225fc07c1021f631b320df78270f8392787
Parents: 24d39a7
Author: Igor Bondarenko <je...@gmail.com>
Authored: Fri Jul 26 08:22:51 2013 +0000
Committer: Tim Van Steenburgh <tv...@gmail.com>
Committed: Tue Jul 30 19:29:23 2013 +0000
----------------------------------------------------------------------
ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-allura/blob/be9d8225/ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py
----------------------------------------------------------------------
diff --git a/ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py b/ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py
index ef931b3..0038dd9 100644
--- a/ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py
+++ b/ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py
@@ -18,6 +18,7 @@
import re
import sys
import json
+import traceback
from urllib import quote, unquote
from urlparse import urljoin, urlsplit
@@ -101,7 +102,14 @@ class WikiExporter(object):
self.options = options
def export(self, out):
- pages = [self.get_page(title) for title in self.page_list()]
+ pages = []
+ for title in self.page_list():
+ try:
+ pages.append(self.get_page(title))
+ except:
+ self.log('Cannot fetch page %s. Skipping' % title)
+ self.log(traceback.format_exc())
+ continue
out.write(json.dumps(pages, indent=2, sort_keys=True))
out.write('\n')