|
@ -101,171 +101,176 @@ while len(todo) > 0: |
|
|
except OSError: |
|
|
except OSError: |
|
|
pass |
|
|
pass |
|
|
|
|
|
|
|
|
# _ |
|
|
try: |
|
|
# _ __ ___ ___| |_ __ _ |
|
|
|
|
|
# | '_ ` _ \ / _ \ __/ _` | |
|
|
# _ |
|
|
# | | | | | | __/ || (_| | |
|
|
# _ __ ___ ___| |_ __ _ |
|
|
# |_| |_| |_|\___|\__\__,_| |
|
|
# | '_ ` _ \ / _ \ __/ _` | |
|
|
|
|
|
# | | | | | | __/ || (_| | |
|
|
meta_out = "{0}/{1}".format(args.path, urlify(padid, ext=".json")) |
|
|
# |_| |_| |_|\___|\__\__,_| |
|
|
if not args.hidepaths: |
|
|
|
|
|
print (meta_out, file=sys.stderr) |
|
|
meta_out = "{0}/{1}".format(args.path, urlify(padid, ext=".json")) |
|
|
if not args.pretend: |
|
|
if not args.hidepaths: |
|
|
meta = {} |
|
|
print (meta_out, file=sys.stderr) |
|
|
meta['padid'] = padid |
|
|
if not args.pretend: |
|
|
revisions_url = apiurl+'getRevisionsCount?'+urlencode(data) |
|
|
meta = {} |
|
|
|
|
|
meta['padid'] = padid |
|
|
|
|
|
revisions_url = apiurl+'getRevisionsCount?'+urlencode(data) |
|
|
|
|
|
if args.showurls: |
|
|
|
|
|
print (revisions_url, file=sys.stderr) |
|
|
|
|
|
meta['total_revisions'] = json.load(urlopen(revisions_url))['data']['revisions'] |
|
|
|
|
|
|
|
|
|
|
|
lastedited_url = apiurl+'getLastEdited?'+urlencode(data) |
|
|
|
|
|
if args.showurls: |
|
|
|
|
|
print (lastedited_url, file=sys.stderr) |
|
|
|
|
|
lastedited_raw = json.load(urlopen(lastedited_url))['data']['lastEdited'] |
|
|
|
|
|
meta['lastedited_raw'] = lastedited_raw |
|
|
|
|
|
meta['lastedited'] = datetime.fromtimestamp(int(lastedited_raw)/1000).isoformat() |
|
|
|
|
|
|
|
|
|
|
|
# author_ids (unfortunately, this is a list of internal etherpad author ids -- not the names ppl type) |
|
|
|
|
|
authors_url = apiurl+'listAuthorsOfPad?'+urlencode(data) |
|
|
|
|
|
if args.showurls: |
|
|
|
|
|
print (authors_url, file=sys.stderr) |
|
|
|
|
|
meta['author_ids'] = json.load(urlopen(authors_url))['data']['authorIDs'] |
|
|
|
|
|
|
|
|
|
|
|
with open(meta_out, "w") as f: |
|
|
|
|
|
json.dump(meta, f) |
|
|
|
|
|
|
|
|
|
|
|
# _ __ __ ___ __ |
|
|
|
|
|
# | '__/ _` \ \ /\ / / |
|
|
|
|
|
# | | | (_| |\ V V / |
|
|
|
|
|
# |_| \__,_| \_/\_/ |
|
|
|
|
|
|
|
|
|
|
|
raw_out = "{0}/{1}".format(args.path, urlify(padid, ext=".txt")) |
|
|
|
|
|
if not args.hidepaths: |
|
|
|
|
|
print (raw_out, file=sys.stderr) |
|
|
|
|
|
text_url = apiurl+"getText?"+urlencode(data) |
|
|
if args.showurls: |
|
|
if args.showurls: |
|
|
print (revisions_url, file=sys.stderr) |
|
|
print (text_url, file=sys.stderr) |
|
|
meta['total_revisions'] = json.load(urlopen(revisions_url))['data']['revisions'] |
|
|
if not args.pretend: |
|
|
|
|
|
rawText = json.load(urlopen(text_url))['data']['text'] |
|
|
lastedited_url = apiurl+'getLastEdited?'+urlencode(data) |
|
|
with open(raw_out, "w") as f: |
|
|
|
|
|
f.write(rawText.encode("utf-8")) |
|
|
|
|
|
|
|
|
|
|
|
# _ _ _ |
|
|
|
|
|
# | |__ | |_ _ __ ___ | | |
|
|
|
|
|
# | '_ \| __| '_ ` _ \| | |
|
|
|
|
|
# | | | | |_| | | | | | | |
|
|
|
|
|
# |_| |_|\__|_| |_| |_|_| |
|
|
|
|
|
|
|
|
|
|
|
# todo ? -- regular HTML output |
|
|
|
|
|
|
|
|
|
|
|
# _ |
|
|
|
|
|
# ___ ___ | | ___ _ __ ___ |
|
|
|
|
|
# / __/ _ \| |/ _ \| '__/ __| |
|
|
|
|
|
# | (_| (_) | | (_) | | \__ \ |
|
|
|
|
|
# \___\___/|_|\___/|_| |___/ |
|
|
|
|
|
|
|
|
|
|
|
colors_out = "{0}/{1}".format(args.path, urlify(padid, ext=".html")) |
|
|
|
|
|
if not args.hidepaths: |
|
|
|
|
|
print (colors_out, file=sys.stderr) |
|
|
|
|
|
data['startRev'] = "0" |
|
|
|
|
|
colors_url = apiurl+'createDiffHTML?'+urlencode(data) |
|
|
if args.showurls: |
|
|
if args.showurls: |
|
|
print (lastedited_url, file=sys.stderr) |
|
|
print (colors_url, file=sys.stderr) |
|
|
lastedited_raw = json.load(urlopen(lastedited_url))['data']['lastEdited'] |
|
|
html = json.load(urlopen(colors_url))['data']['html'] |
|
|
meta['lastedited_raw'] = lastedited_raw |
|
|
t = html5lib.parse(html, namespaceHTMLElements=False) |
|
|
meta['lastedited'] = datetime.fromtimestamp(int(lastedited_raw)/1000).isoformat() |
|
|
trim_removed_spans(t) |
|
|
|
|
|
html = ET.tostring(t, method="html") |
|
|
# author_ids (unfortunately, this is a list of internal etherpad author ids -- not the names ppl type) |
|
|
|
|
|
authors_url = apiurl+'listAuthorsOfPad?'+urlencode(data) |
|
|
# Stage 1: Process as text |
|
|
if args.showurls: |
|
|
# Process [[wikilink]] style links |
|
|
print (authors_url, file=sys.stderr) |
|
|
# and (optionally) add linked page names to spider todo list |
|
|
meta['author_ids'] = json.load(urlopen(authors_url))['data']['authorIDs'] |
|
|
html, links = linkify(html) |
|
|
|
|
|
if args.spider: |
|
|
with open(meta_out, "w") as f: |
|
|
for l in links: |
|
|
json.dump(meta, f) |
|
|
if l not in todo and l not in done: |
|
|
|
|
|
if l.startswith("http://") or l.startswith("https://"): |
|
|
# _ __ __ ___ __ |
|
|
|
|
|
# | '__/ _` \ \ /\ / / |
|
|
|
|
|
# | | | (_| |\ V V / |
|
|
|
|
|
# |_| \__,_| \_/\_/ |
|
|
|
|
|
|
|
|
|
|
|
raw_out = "{0}/{1}".format(args.path, urlify(padid, ext=".txt")) |
|
|
|
|
|
if not args.hidepaths: |
|
|
|
|
|
print (raw_out, file=sys.stderr) |
|
|
|
|
|
text_url = apiurl+"getText?"+urlencode(data) |
|
|
|
|
|
if args.showurls: |
|
|
|
|
|
print (text_url, file=sys.stderr) |
|
|
|
|
|
if not args.pretend: |
|
|
|
|
|
rawText = json.load(urlopen(text_url))['data']['text'] |
|
|
|
|
|
with open(raw_out, "w") as f: |
|
|
|
|
|
f.write(rawText.encode("utf-8")) |
|
|
|
|
|
|
|
|
|
|
|
# _ _ _ |
|
|
|
|
|
# | |__ | |_ _ __ ___ | | |
|
|
|
|
|
# | '_ \| __| '_ ` _ \| | |
|
|
|
|
|
# | | | | |_| | | | | | | |
|
|
|
|
|
# |_| |_|\__|_| |_| |_|_| |
|
|
|
|
|
|
|
|
|
|
|
# todo ? -- regular HTML output |
|
|
|
|
|
|
|
|
|
|
|
# _ |
|
|
|
|
|
# ___ ___ | | ___ _ __ ___ |
|
|
|
|
|
# / __/ _ \| |/ _ \| '__/ __| |
|
|
|
|
|
# | (_| (_) | | (_) | | \__ \ |
|
|
|
|
|
# \___\___/|_|\___/|_| |___/ |
|
|
|
|
|
|
|
|
|
|
|
colors_out = "{0}/{1}".format(args.path, urlify(padid, ext=".html")) |
|
|
|
|
|
if not args.hidepaths: |
|
|
|
|
|
print (colors_out, file=sys.stderr) |
|
|
|
|
|
data['startRev'] = "0" |
|
|
|
|
|
colors_url = apiurl+'createDiffHTML?'+urlencode(data) |
|
|
|
|
|
if args.showurls: |
|
|
|
|
|
print (colors_url, file=sys.stderr) |
|
|
|
|
|
html = json.load(urlopen(colors_url))['data']['html'] |
|
|
|
|
|
t = html5lib.parse(html, namespaceHTMLElements=False) |
|
|
|
|
|
trim_removed_spans(t) |
|
|
|
|
|
html = ET.tostring(t, method="html") |
|
|
|
|
|
|
|
|
|
|
|
# Stage 1: Process as text |
|
|
|
|
|
# Process [[wikilink]] style links |
|
|
|
|
|
# and (optionally) add linked page names to spider todo list |
|
|
|
|
|
html, links = linkify(html) |
|
|
|
|
|
if args.spider: |
|
|
|
|
|
for l in links: |
|
|
|
|
|
if l not in todo and l not in done: |
|
|
|
|
|
if l.startswith("http://") or l.startswith("https://"): |
|
|
|
|
|
print ("Ignoring absolute URL in [[ link ]] form", file=sys.stderr) |
|
|
|
|
|
continue |
|
|
|
|
|
# if args.verbose: |
|
|
|
|
|
# print (" link: {0}".format(l), file=sys.stderr) |
|
|
|
|
|
todo.append(l) |
|
|
|
|
|
|
|
|
|
|
|
# Stage 2: Process as ElementTree |
|
|
|
|
|
# |
|
|
|
|
|
t = html5lib.parse(html, namespaceHTMLElements=False) |
|
|
|
|
|
# apply linkpats |
|
|
|
|
|
for a in t.findall(".//a"): |
|
|
|
|
|
href = a.attrib.get("href") |
|
|
|
|
|
original_href = href |
|
|
|
|
|
if href: |
|
|
|
|
|
# if args.verbose: |
|
|
|
|
|
# print ("searching for PADLINK: {0}".format(href)) |
|
|
|
|
|
for pat in padlinkpats: |
|
|
|
|
|
if re.search(pat, href) != None: |
|
|
|
|
|
# if args.verbose: |
|
|
|
|
|
# print (" found PADLINK: {0}".format(href)) |
|
|
|
|
|
href = re.sub(pat, "\\1.html", href) |
|
|
|
|
|
padid = filename_to_padid(href) |
|
|
|
|
|
set_text_contents(a, "[[{0}]]".format(padid)) |
|
|
|
|
|
if padid not in todo and padid not in done: |
|
|
|
|
|
if args.verbose: |
|
|
if args.verbose: |
|
|
print (" link: {0}".format(padid), file=sys.stderr) |
|
|
print ("Ignoring absolute URL in [[ link ]] form", file=sys.stderr) |
|
|
todo.append(padid) |
|
|
continue |
|
|
# apply linkpats |
|
|
# if args.verbose: |
|
|
for s, r in linkpats: |
|
|
# print (" link: {0}".format(l), file=sys.stderr) |
|
|
href = re.sub(s, r, href) |
|
|
todo.append(l) |
|
|
if href != original_href: |
|
|
|
|
|
old_contents = text_contents(a) |
|
|
# Stage 2: Process as ElementTree |
|
|
# print ("OLD_CONTENTS {0}".format(old_contents)) |
|
|
# |
|
|
if old_contents == original_href: |
|
|
t = html5lib.parse(html, namespaceHTMLElements=False) |
|
|
if args.verbose: |
|
|
# apply linkpats |
|
|
print (" Updating href IN TEXT", file=sys.stderr) |
|
|
for a in t.findall(".//a"): |
|
|
set_text_contents(a, href) |
|
|
href = a.attrib.get("href") |
|
|
|
|
|
original_href = href |
|
|
if original_href != href: |
|
|
if href: |
|
|
if args.verbose: |
|
|
# if args.verbose: |
|
|
print (" Changed href from {0} to {1}".format(original_href, href), file=sys.stderr) |
|
|
# print ("searching for PADLINK: {0}".format(href)) |
|
|
a.attrib['href'] = href |
|
|
for pat in padlinkpats: |
|
|
|
|
|
if re.search(pat, href) != None: |
|
|
# SHOWIMAGES : inject img tag for (local) images |
|
|
# if args.verbose: |
|
|
if args.add_images: |
|
|
# print (" found PADLINK: {0}".format(href)) |
|
|
ext = os.path.splitext(href)[1].lower().lstrip(".") |
|
|
href = re.sub(pat, "\\1.html", href) |
|
|
if ext in ("png", "gif", "jpeg", "jpg"): |
|
|
padid = filename_to_padid(href) |
|
|
# ap = _parent(a) |
|
|
set_text_contents(a, "[[{0}]]".format(padid)) |
|
|
print ("Adding img '{0}'".format(href), file=sys.stderr) |
|
|
if padid not in todo and padid not in done: |
|
|
img = ET.SubElement(a, "img") |
|
|
if args.verbose: |
|
|
br = ET.SubElement(a, "br") |
|
|
print (" link: {0}".format(padid), file=sys.stderr) |
|
|
a.remove(img); a.insert(0, img) |
|
|
todo.append(padid) |
|
|
a.remove(br); a.insert(1, br) |
|
|
# apply linkpats |
|
|
img.attrib['src'] = href |
|
|
for s, r in linkpats: |
|
|
|
|
|
href = re.sub(s, r, href) |
|
|
# extract the style tag (with authorship colors) |
|
|
if href != original_href: |
|
|
style = t.find(".//style") |
|
|
old_contents = text_contents(a) |
|
|
if style != None: |
|
|
# print ("OLD_CONTENTS {0}".format(old_contents)) |
|
|
style = ET.tostring(style, method="html") |
|
|
if old_contents == original_href: |
|
|
else: |
|
|
if args.verbose: |
|
|
style = "" |
|
|
print (" Updating href IN TEXT", file=sys.stderr) |
|
|
# and extract the contents of the body |
|
|
set_text_contents(a, href) |
|
|
html = contents(t.find(".//body")) |
|
|
|
|
|
|
|
|
if original_href != href: |
|
|
if not args.pretend: |
|
|
if args.verbose: |
|
|
with open(colors_out, "w") as f: |
|
|
print (" Changed href from {0} to {1}".format(original_href, href), file=sys.stderr) |
|
|
# f.write(html.encode("utf-8")) |
|
|
a.attrib['href'] = href |
|
|
f.write(colors_template.render( |
|
|
|
|
|
html = html, |
|
|
# SHOWIMAGES : inject img tag for (local) images |
|
|
style = style, |
|
|
if args.add_images: |
|
|
revision = meta['total_revisions'], |
|
|
ext = os.path.splitext(href)[1].lower().lstrip(".") |
|
|
padid = padid, |
|
|
if ext in ("png", "gif", "jpeg", "jpg"): |
|
|
timestamp = datetime.now() |
|
|
# ap = _parent(a) |
|
|
).encode("utf-8")) |
|
|
print ("Adding img '{0}'".format(href), file=sys.stderr) |
|
|
|
|
|
img = ET.SubElement(a, "img") |
|
|
# _ |
|
|
br = ET.SubElement(a, "br") |
|
|
# | | ___ ___ _ __ |
|
|
a.remove(img); a.insert(0, img) |
|
|
# | |/ _ \ / _ \| '_ \ |
|
|
a.remove(br); a.insert(1, br) |
|
|
# | | (_) | (_) | |_) | |
|
|
img.attrib['src'] = href |
|
|
# |_|\___/ \___/| .__/ |
|
|
|
|
|
# |_| |
|
|
# extract the style tag (with authorship colors) |
|
|
|
|
|
style = t.find(".//style") |
|
|
count += 1 |
|
|
if style != None: |
|
|
if args.limit and count >= args.limit: |
|
|
style = ET.tostring(style, method="html") |
|
|
break |
|
|
else: |
|
|
|
|
|
style = "" |
|
|
|
|
|
# and extract the contents of the body |
|
|
|
|
|
html = contents(t.find(".//body")) |
|
|
|
|
|
|
|
|
|
|
|
if not args.pretend: |
|
|
|
|
|
with open(colors_out, "w") as f: |
|
|
|
|
|
# f.write(html.encode("utf-8")) |
|
|
|
|
|
f.write(colors_template.render( |
|
|
|
|
|
html = html, |
|
|
|
|
|
style = style, |
|
|
|
|
|
revision = meta['total_revisions'], |
|
|
|
|
|
padid = padid, |
|
|
|
|
|
timestamp = datetime.now() |
|
|
|
|
|
).encode("utf-8")) |
|
|
|
|
|
|
|
|
|
|
|
# _ |
|
|
|
|
|
# | | ___ ___ _ __ |
|
|
|
|
|
# | |/ _ \ / _ \| '_ \ |
|
|
|
|
|
# | | (_) | (_) | |_) | |
|
|
|
|
|
# |_|\___/ \___/| .__/ |
|
|
|
|
|
# |_| |
|
|
|
|
|
|
|
|
|
|
|
count += 1 |
|
|
|
|
|
if args.limit and count >= args.limit: |
|
|
|
|
|
break |
|
|
|
|
|
except TypeError: |
|
|
|
|
|
print ("ERROR, skipping!", file=sys.stderr) |