merged roels unicsss.py code
This commit is contained in:
parent
30c9247518
commit
6260572237
25
etherdump
25
etherdump
@ -1,4 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
# License: AGPL
|
||||||
|
#
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
# stdlib
|
# stdlib
|
||||||
import json, sys, os, re
|
import json, sys, os, re
|
||||||
@ -47,9 +50,10 @@ p.add_argument("--showurls", default=False, action="store_true", help="flag to d
|
|||||||
p.add_argument("--hidepaths", default=False, action="store_true", help="flag to not display paths")
|
p.add_argument("--hidepaths", default=False, action="store_true", help="flag to not display paths")
|
||||||
p.add_argument("--pretend", default=False, action="store_true", help="flag to not actually save")
|
p.add_argument("--pretend", default=False, action="store_true", help="flag to not actually save")
|
||||||
p.add_argument("--add-images", default=False, action="store_true", help="flag to add image tags")
|
p.add_argument("--add-images", default=False, action="store_true", help="flag to add image tags")
|
||||||
|
p.add_argument("--authors-css", default="authors.css", help="filename to save collected authorship css (nb: etherdump will overwrite this file!)")
|
||||||
|
|
||||||
# TODO css from pad --- ie specify a padid for a stylesheet!!!!!!
|
# TODO css from pad --- ie specify a padid for a stylesheet!!!!!!
|
||||||
p.add_argument("--css", default="styles.css", help="padid of stylesheet")
|
# p.add_argument("--css", default="styles.css", help="padid of stylesheet")
|
||||||
|
|
||||||
|
|
||||||
args = p.parse_args()
|
args = p.parse_args()
|
||||||
@ -77,6 +81,7 @@ todo = args.padid
|
|||||||
done = set()
|
done = set()
|
||||||
count = 0
|
count = 0
|
||||||
data = {}
|
data = {}
|
||||||
|
authors_css_rules = {}
|
||||||
data['apikey'] = info['apikey']
|
data['apikey'] = info['apikey']
|
||||||
|
|
||||||
if args.allpads:
|
if args.allpads:
|
||||||
@ -245,7 +250,15 @@ while len(todo) > 0:
|
|||||||
# extract the style tag (with authorship colors)
|
# extract the style tag (with authorship colors)
|
||||||
style = t.find(".//style")
|
style = t.find(".//style")
|
||||||
if style != None:
|
if style != None:
|
||||||
style = ET.tostring(style, method="html")
|
if args.authors_css:
|
||||||
|
for i in style.text.splitlines():
|
||||||
|
if len(i):
|
||||||
|
selector, rule = i.split(' ',1)
|
||||||
|
authors_css_rules[selector] = rule
|
||||||
|
style = '' # strip the individual style tag from each page (only exports to authors-css file)
|
||||||
|
# nb: it's up to the template to refer to the authors-css file
|
||||||
|
else:
|
||||||
|
style = ET.tostring(style, method="html")
|
||||||
else:
|
else:
|
||||||
style = ""
|
style = ""
|
||||||
# and extract the contents of the body
|
# and extract the contents of the body
|
||||||
@ -273,4 +286,10 @@ while len(todo) > 0:
|
|||||||
if args.limit and count >= args.limit:
|
if args.limit and count >= args.limit:
|
||||||
break
|
break
|
||||||
except TypeError:
|
except TypeError:
|
||||||
print ("ERROR, skipping!", file=sys.stderr)
|
print ("ERROR, skipping!", file=sys.stderr)
|
||||||
|
|
||||||
|
# Write the unified CSS with authors
|
||||||
|
if args.authors_css:
|
||||||
|
with open(args.authors_css, 'w') as css:
|
||||||
|
for selector, rule in sorted(authors_css_rules.items()):
|
||||||
|
css.write(selector+' '+rule+'\n')
|
52
linkify.py
52
linkify.py
@ -3,39 +3,47 @@ import re, sys
|
|||||||
|
|
||||||
|
|
||||||
def strip_tags (text):
|
def strip_tags (text):
|
||||||
return re.sub(r"<.*?>", "", text)
|
return re.sub(r"<.*?>", "", text)
|
||||||
|
|
||||||
def urlify (t, ext=".html"):
|
def urlify (t, ext=".html"):
|
||||||
return t.replace(" ", "_") + ext
|
return t.replace(" ", "_") + ext
|
||||||
|
|
||||||
def filename_to_padid (t):
|
def filename_to_padid (t):
|
||||||
t = t.replace("_", " ")
|
t = t.replace("_", " ")
|
||||||
t = re.sub(r"\.html$", "", t)
|
t = re.sub(r"\.html$", "", t)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
|
def normalize_pad_name (n):
|
||||||
|
if '?' in n:
|
||||||
|
n = n.split('?', 1)[0]
|
||||||
|
if '/' in n:
|
||||||
|
n = n.split('/', 1)[0]
|
||||||
|
return n
|
||||||
|
|
||||||
def linkify (src, urlify=urlify):
|
def linkify (src, urlify=urlify):
|
||||||
|
|
||||||
collect = []
|
collect = []
|
||||||
|
|
||||||
def s (m):
|
def s (m):
|
||||||
contents = strip_tags(m.group(1))
|
contents = strip_tags(m.group(1))
|
||||||
collect.append(contents)
|
contents = normalize_pad_name(contents)
|
||||||
link = urlify(contents)
|
collect.append(contents)
|
||||||
# link = link.split("?", 1)[0]
|
link = urlify(contents)
|
||||||
return "[[<a class=\"wikilink\" href=\"{0}\">{1}</a>]]".format(link, contents)
|
# link = link.split("?", 1)[0]
|
||||||
|
return "[[<a class=\"wikilink\" href=\"{0}\">{1}</a>]]".format(link, contents)
|
||||||
|
|
||||||
# src = re.sub(r"\[\[([\w_\- ,]+?)\]\]", s, src)
|
# src = re.sub(r"\[\[([\w_\- ,]+?)\]\]", s, src)
|
||||||
## question marks are ignored by etherpad, so split/strip it
|
## question marks are ignored by etherpad, so split/strip it
|
||||||
## strip slashes as well!! (/timeslider)
|
## strip slashes as well!! (/timeslider)
|
||||||
src = re.sub(r"\[\[(.+?)(\/.*)?(\?.*)?\]\]", s, src)
|
src = re.sub(r"\[\[(.+?)\]\]", s, src)
|
||||||
return (src, collect)
|
return (src, collect)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
src = sys.stdin.read()
|
src = sys.stdin.read()
|
||||||
src, links = linkify(src)
|
src, links = linkify(src)
|
||||||
|
|
||||||
for l in links:
|
for l in links:
|
||||||
print (l)
|
print (l)
|
||||||
|
|
||||||
print (src)
|
print (src)
|
||||||
|
Loading…
Reference in New Issue
Block a user