Browse Source

python3

add-quote-import
Michael Murtaugh 6 years ago
parent
commit
8df9c56ac1
  1. 8
      README.md
  2. 11
      bin/etherdump
  3. 40
      etherdump/commands/common.py
  4. 9
      etherdump/commands/creatediffhtml.py
  5. 9
      etherdump/commands/deletepad.py
  6. 1
      etherdump/commands/dumpcsv.py
  7. 9
      etherdump/commands/gethtml.py
  8. 23
      etherdump/commands/gettext.py
  9. 1
      etherdump/commands/html5tidy.py
  10. 1
      etherdump/commands/index.py
  11. 32
      etherdump/commands/init.py
  12. 5
      etherdump/commands/join.py
  13. 24
      etherdump/commands/list.py
  14. 9
      etherdump/commands/listauthors.py
  15. 47
      etherdump/commands/pull.py
  16. 7
      etherdump/commands/revisionscount.py
  17. 66
      etherdump/commands/sethtml.py
  18. 18
      etherdump/commands/settext.py
  19. 3
      etherdump/commands/showmeta.py
  20. 1
      etherdump/commands/status.py
  21. 4
      setup.py

8
README.md

@ -66,3 +66,11 @@ Originally designed for use at: [constant](http://etherdump.constantvzw.org/).
----------------------------------------------- -----------------------------------------------
Preparations for [Machine Research](https://machineresearch.wordpress.com/) [2](http://constantvzw.org/site/Machine-Research,2646.html) Preparations for [Machine Research](https://machineresearch.wordpress.com/) [2](http://constantvzw.org/site/Machine-Research,2646.html)
6 Oct 2017
----------------------
Feature request from PW: When deleting a previously public document, generate a page / pages with an explanation (along the lines of "This document was previously public but has been marked .... maybe give links to search").
3 Nov 2017
---------------
machineresearch seems to be __NOPUBLISH__ but still exists (also in recentchanges)

11
bin/etherdump

@ -1,5 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python3
from __future__ import print_function
import sys import sys
usage = """Usage: usage = """Usage:
@ -31,13 +32,13 @@ try:
else: else:
args = sys.argv[2:] args = sys.argv[2:]
except IndexError: except IndexError:
print usage print (usage)
sys.exit(0) sys.exit(0)
try: try:
# http://stackoverflow.com/questions/301134/dynamic-module-import-in-python # http://stackoverflow.com/questions/301134/dynamic-module-import-in-python
cmdmod = __import__("etherdump.commands.%s" % cmd, fromlist=["etherdump.commands"]) cmdmod = __import__("etherdump.commands.%s" % cmd, fromlist=["etherdump.commands"])
cmdmod.main(args) cmdmod.main(args)
except ImportError, e: except ImportError as e:
print "Error performing command '{0}'\n(python said: {1})\n".format(cmd, e) print ("Error performing command '{0}'\n(python said: {1})\n".format(cmd, e))
print usage print (usage)

40
etherdump/commands/common.py

@ -1,10 +1,22 @@
from __future__ import print_function from __future__ import print_function
import re, os, json, sys import re, os, json, sys
from urllib import quote_plus, unquote_plus
from math import ceil, floor from math import ceil, floor
from urllib2 import urlopen, HTTPError
from time import sleep from time import sleep
try:
# python2
from urlparse import urlparse, urlunparse
from urllib2 import urlopen, URLError, HTTPError
from urllib import urlencode
from urllib import quote_plus, unquote_plus
from htmlentitydefs import name2codepoint
input = raw_input
except ImportError:
# python3
from urllib.parse import urlparse, urlunparse, urlencode, quote_plus, unquote_plus
from urllib.request import urlopen, URLError, HTTPError
from html.entities import name2codepoint
groupnamepat = re.compile(r"^g\.(\w+)\$") groupnamepat = re.compile(r"^g\.(\w+)\$")
def splitpadname (padid): def splitpadname (padid):
@ -17,15 +29,19 @@ def splitpadname (padid):
def padurl (padid, ): def padurl (padid, ):
return padid return padid
def padpath (padid, pub_path=u"", group_path=u""): def padpath (padid, pub_path=u"", group_path=u"", normalize=False):
g, p = splitpadname(padid) g, p = splitpadname(padid)
if type(g) == unicode: # if type(g) == unicode:
g = g.encode("utf-8") # g = g.encode("utf-8")
if type(p) == unicode: # if type(p) == unicode:
p = p.encode("utf-8") # p = p.encode("utf-8")
p = quote_plus(p) p = quote_plus(p)
# p = p.replace(" ", "_") if normalize:
# p = p.replace("*", "-") p = p.replace(" ", "_")
p = p.replace("(", "")
p = p.replace(")", "")
p = p.replace("?", "")
p = p.replace("'", "")
if g: if g:
return os.path.join(group_path, g, p) return os.path.join(group_path, g, p)
else: else:
@ -49,6 +65,7 @@ def getjson (url, max_retry=3, retry_sleep_time=0.5):
try: try:
f = urlopen(url) f = urlopen(url)
data = f.read() data = f.read()
data = data.decode("utf-8")
rurl = f.geturl() rurl = f.geturl()
f.close() f.close()
ret.update(json.loads(data)) ret.update(json.loads(data))
@ -77,13 +94,12 @@ def progressbar (i, num, label="", file=sys.stderr):
bars = int(ceil(p*20)) bars = int(ceil(p*20))
bar = ("*"*bars) + ("-"*(20-bars)) bar = ("*"*bars) + ("-"*(20-bars))
msg = u"\r{0} {1}/{2} {3}... ".format(bar, (i+1), num, label) msg = u"\r{0} {1}/{2} {3}... ".format(bar, (i+1), num, label)
sys.stderr.write(msg.encode("utf-8")) sys.stderr.write(msg)
sys.stderr.flush() sys.stderr.flush()
# Python developer Fredrik Lundh (author of elementtree, among other things) has such a function on his website, which works with decimal, hex and named entities: # Python developer Fredrik Lundh (author of elementtree, among other things) has such a function on his website, which works with decimal, hex and named entities:
import re, htmlentitydefs
## ##
# Removes HTML or XML character references and entities from a text string. # Removes HTML or XML character references and entities from a text string.
# #
@ -104,7 +120,7 @@ def unescape(text):
else: else:
# named entity # named entity
try: try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) text = unichr(name2codepoint[text[1:-1]])
except KeyError: except KeyError:
pass pass
return text # leave as is return text # leave as is

9
etherdump/commands/creatediffhtml.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json import json
from urllib import urlencode from urllib import urlencode
@ -27,13 +26,13 @@ def main(args):
data['rev'] = args.rev data['rev'] = args.rev
requesturl = apiurl+'createDiffHTML?'+urlencode(data) requesturl = apiurl+'createDiffHTML?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
try: try:
results = json.load(urlopen(requesturl))['data'] results = json.load(urlopen(requesturl))['data']
if args.format == "json": if args.format == "json":
print json.dumps(results) print (json.dumps(results))
else: else:
print results['html'].encode("utf-8") print (results['html'].encode("utf-8"))
except HTTPError as e: except HTTPError as e:
pass pass

9
etherdump/commands/deletepad.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json import json
from urllib import urlencode from urllib import urlencode
@ -23,11 +22,11 @@ def main(args):
data['padID'] = args.padid # is utf-8 encoded data['padID'] = args.padid # is utf-8 encoded
requesturl = apiurl+'deletePad?'+urlencode(data) requesturl = apiurl+'deletePad?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
results = json.load(urlopen(requesturl)) results = json.load(urlopen(requesturl))
if args.format == "json": if args.format == "json":
print json.dumps(results) print (json.dumps(results))
else: else:
if results['data']: if results['data']:
print results['data']['text'].encode("utf-8") print (results['data']['text'].encode("utf-8"))

1
etherdump/commands/dumpcsv.py

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import print_function from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import sys, json, re import sys, json, re

9
etherdump/commands/gethtml.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json import json
from urllib import urlencode from urllib import urlencode
@ -26,10 +25,10 @@ def main(args):
data['rev'] = args.rev data['rev'] = args.rev
requesturl = apiurl+'getHTML?'+urlencode(data) requesturl = apiurl+'getHTML?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
results = json.load(urlopen(requesturl))['data'] results = json.load(urlopen(requesturl))['data']
if args.format == "json": if args.format == "json":
print json.dumps(results) print (json.dumps(results))
else: else:
print results['html'].encode("utf-8") print (results['html'].encode("utf-8"))

23
etherdump/commands/gettext.py

@ -1,9 +1,14 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json, sys import json, sys
from urllib import urlencode try:
from urllib2 import urlopen, HTTPError, URLError # python2
from urllib2 import urlopen, URLError, HTTPError
from urllib import urlencode
except ImportError:
# python3
from urllib.parse import urlencode
from urllib.request import urlopen, URLError, HTTPError
def main(args): def main(args):
@ -26,11 +31,13 @@ def main(args):
data['rev'] = args.rev data['rev'] = args.rev
requesturl = apiurl+'getText?'+urlencode(data) requesturl = apiurl+'getText?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
results = json.load(urlopen(requesturl)) resp = urlopen(requesturl).read()
resp = resp.decode("utf-8")
results = json.loads(resp)
if args.format == "json": if args.format == "json":
print json.dumps(results) print (json.dumps(results))
else: else:
if results['data']: if results['data']:
sys.stdout.write(results['data']['text'].encode("utf-8")) sys.stdout.write(results['data']['text'])

1
etherdump/commands/html5tidy.py

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import print_function from __future__ import print_function
from html5lib import parse from html5lib import parse
import os, sys import os, sys

1
etherdump/commands/index.py

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import print_function from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import sys, json, re, os, urlparse import sys, json, re, os, urlparse

32
etherdump/commands/init.py

@ -1,8 +1,17 @@
from __future__ import print_function from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
from urlparse import urlparse, urlunparse
from urllib2 import urlopen, URLError, HTTPError try:
from urllib import urlencode # python2
from urlparse import urlparse, urlunparse
from urllib2 import urlopen, URLError, HTTPError
from urllib import urlencode
input = raw_input
except ImportError:
# python3
from urllib.parse import urlparse, urlunparse, urlencode
from urllib.request import urlopen, URLError, HTTPError
import json, os, sys import json, os, sys
def get_api(url, cmd=None, data=None, verbose=False): def get_api(url, cmd=None, data=None, verbose=False):
@ -13,7 +22,9 @@ def get_api(url, cmd=None, data=None, verbose=False):
# data['apikey'] = "7c8faa070c97f83d8f705c935a32d5141f89cbaa2158042fa92e8ddad5dbc5e1" # data['apikey'] = "7c8faa070c97f83d8f705c935a32d5141f89cbaa2158042fa92e8ddad5dbc5e1"
if verbose: if verbose:
print ("trying", useurl, file=sys.stderr) print ("trying", useurl, file=sys.stderr)
resp = json.load(urlopen(useurl)) resp = urlopen(useurl).read()
resp = resp.decode("utf-8")
resp = json.loads(resp)
if "code" in resp and "message" in resp: if "code" in resp and "message" in resp:
return resp return resp
except ValueError as e: except ValueError as e:
@ -25,10 +36,11 @@ def get_api(url, cmd=None, data=None, verbose=False):
print (" HTTPError", e, file=sys.stderr) print (" HTTPError", e, file=sys.stderr)
if e.code == 401: if e.code == 401:
# Unauthorized is how the API responds to an incorrect API key # Unauthorized is how the API responds to an incorrect API key
resp = json.load(e) return {"code": 401, "message": e}
if "code" in resp and "message" in resp: # resp = json.load(e)
# print ("returning", resp, file=sys.stderr) # if "code" in resp and "message" in resp:
return resp # # print ("returning", resp, file=sys.stderr)
# return resp
def tryapiurl (url, verbose=False): def tryapiurl (url, verbose=False):
""" """
@ -100,7 +112,7 @@ def main(args):
if apiurl: if apiurl:
# print ("Got APIURL: {0}".format(apiurl)) # print ("Got APIURL: {0}".format(apiurl))
break break
apiurl = raw_input("Please type the URL of the etherpad: ").strip() apiurl = input("Please type the URL of the etherpad: ").strip()
padinfo["apiurl"] = apiurl padinfo["apiurl"] = apiurl
apikey = args.apikey apikey = args.apikey
while True: while True:
@ -112,7 +124,7 @@ def main(args):
else: else:
print ("bad") print ("bad")
print ("The APIKEY is the contents of the file APIKEY.txt in the etherpad folder", file=sys.stderr) print ("The APIKEY is the contents of the file APIKEY.txt in the etherpad folder", file=sys.stderr)
apikey = raw_input("Please paste the APIKEY: ").strip() apikey = input("Please paste the APIKEY: ").strip()
padinfo["apikey"] = apikey padinfo["apikey"] = apikey
with open(padinfopath, "w") as f: with open(padinfopath, "w") as f:

5
etherdump/commands/join.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json, os, re import json, os, re
from urllib import urlencode from urllib import urlencode
@ -41,5 +40,5 @@ def main(args):
pass pass
for i in items: for i in items:
newloc = os.path.join(itembase, i) newloc = os.path.join(itembase, i)
print "'{0}' => '{1}'".format(i, newloc) print ("'{0}' => '{1}'".format(i, newloc))
os.rename(i, newloc) os.rename(i, newloc)

24
etherdump/commands/list.py

@ -1,10 +1,18 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json import json
from urllib import urlencode import sys
from urllib2 import urlopen, HTTPError, URLError from etherdump.commands.common import getjson
from common import getjson try:
# python2
from urlparse import urlparse, urlunparse
from urllib2 import urlopen, URLError, HTTPError
from urllib import urlencode
input = raw_input
except ImportError:
# python3
from urllib.parse import urlparse, urlunparse, urlencode
from urllib.request import urlopen, URLError, HTTPError
def main (args): def main (args):
p = ArgumentParser("call listAllPads and print the results") p = ArgumentParser("call listAllPads and print the results")
@ -21,12 +29,12 @@ def main (args):
data['apikey'] = info['apikey'] data['apikey'] = info['apikey']
requesturl = apiurl+'listAllPads?'+urlencode(data) requesturl = apiurl+'listAllPads?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
results = getjson(requesturl)['data']['padIDs'] results = getjson(requesturl)['data']['padIDs']
if args.format == "json": if args.format == "json":
print json.dumps(results) print (json.dumps(results))
else: else:
for r in results: for r in results:
print r.encode("utf-8") print (r)

9
etherdump/commands/listauthors.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json import json
from urllib import urlencode from urllib import urlencode
@ -22,11 +21,11 @@ def main(args):
data['padID'] = args.padid.encode("utf-8") data['padID'] = args.padid.encode("utf-8")
requesturl = apiurl+'listAuthorsOfPad?'+urlencode(data) requesturl = apiurl+'listAuthorsOfPad?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
results = json.load(urlopen(requesturl))['data']['authorIDs'] results = json.load(urlopen(requesturl))['data']['authorIDs']
if args.format == "json": if args.format == "json":
print json.dumps(results) print (json.dumps(results))
else: else:
for r in results: for r in results:
print r.encode("utf-8") print (r.encode("utf-8"))

47
etherdump/commands/pull.py

@ -1,13 +1,20 @@
#!/usr/bin/env python
from __future__ import print_function from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import sys, json, re, os import sys, json, re, os
from datetime import datetime from datetime import datetime
from urllib import urlencode, quote
from urllib2 import HTTPError try:
from common import * # python2
from urllib2 import urlopen, URLError, HTTPError
from urllib import urlencode
except ImportError:
# python3
from urllib.parse import urlencode, quote
from urllib.request import urlopen, URLError, HTTPError
from etherdump.commands.common import *
from time import sleep from time import sleep
from html5tidy import html5tidy from etherdump.commands.html5tidy import html5tidy
import html5lib import html5lib
from xml.etree import ElementTree as ET from xml.etree import ElementTree as ET
# debugging # debugging
@ -49,6 +56,9 @@ def main (args):
p.add_argument("--output", default=False, action="store_true", help="output changed padids on stdout") p.add_argument("--output", default=False, action="store_true", help="output changed padids on stdout")
p.add_argument("--force", default=False, action="store_true", help="reload, even if revisions count matches previous") p.add_argument("--force", default=False, action="store_true", help="reload, even if revisions count matches previous")
p.add_argument("--no-raw-ext", default=False, action="store_true", help="save plain text as padname with no (additional) extension") p.add_argument("--no-raw-ext", default=False, action="store_true", help="save plain text as padname with no (additional) extension")
p.add_argument("--fix-names", default=False, action="store_true", help="normalize padid's (no spaces, special control chars) for use in file names")
p.add_argument("--filter-ext", default=None, help="filter pads by extension")
p.add_argument("--css", default="/styles.css", help="add css url to output pages, default: /styles.css") p.add_argument("--css", default="/styles.css", help="add css url to output pages, default: /styles.css")
p.add_argument("--script", default="/versions.js", help="add script url to output pages, default: /versions.js") p.add_argument("--script", default="/versions.js", help="add script url to output pages, default: /versions.js")
@ -79,7 +89,7 @@ def main (args):
progressbar(i, numpads, padid) progressbar(i, numpads, padid)
data['padID'] = padid.encode("utf-8") data['padID'] = padid.encode("utf-8")
p = padpath(padid, args.pub, args.group) p = padpath(padid, args.pub, args.group, args.fix_names)
if args.folder: if args.folder:
p = os.path.join(p, padid.encode("utf-8")) p = os.path.join(p, padid.encode("utf-8"))
@ -89,8 +99,8 @@ def main (args):
skip = False skip = False
padurlbase = re.sub(r"api/1.2.9/$", "p/", info["apiurl"]) padurlbase = re.sub(r"api/1.2.9/$", "p/", info["apiurl"])
meta = {} meta = {}
if type(padurlbase) == unicode: # if type(padurlbase) == unicode:
padurlbase = padurlbase.encode("utf-8") # padurlbase = padurlbase.encode("utf-8")
while True: while True:
try: try:
if os.path.exists(metapath): if os.path.exists(metapath):
@ -101,10 +111,10 @@ def main (args):
skip=True skip=True
break break
meta['padid'] = padid.encode("utf-8") meta['padid'] = padid # .encode("utf-8")
versions = meta["versions"] = [] versions = meta["versions"] = []
versions.append({ versions.append({
"url": padurlbase + quote(padid.encode("utf-8")), # this quote was really important for dealing with rogue chars like \xa0 in a padid; "url": padurlbase + quote(padid),
"type": "pad", "type": "pad",
"code": 200 "code": 200
}) })
@ -129,13 +139,13 @@ def main (args):
except HTTPError as e: except HTTPError as e:
tries += 1 tries += 1
if tries > 3: if tries > 3:
print ("Too many failures ({0}), skipping".format(padid).encode("utf-8"), file=sys.stderr) print ("Too many failures ({0}), skipping".format(padid), file=sys.stderr)
skip=True skip=True
break break
else: else:
sleep(3) sleep(3)
except TypeError as e: except TypeError as e:
print ("Type Error loading pad {0} (phantom pad?), skipping".format(padid).encode("utf-8"), file=sys.stderr) print ("Type Error loading pad {0} (phantom pad?), skipping".format(padid), file=sys.stderr)
skip=True skip=True
break break
@ -145,7 +155,7 @@ def main (args):
count += 1 count += 1
if args.output: if args.output:
print (padid.encode("utf-8")) print (padid)
if args.all or (args.meta or args.text or args.html or args.dhtml): if args.all or (args.meta or args.text or args.html or args.dhtml):
try: try:
@ -172,7 +182,7 @@ def main (args):
ver["path"] = p+raw_ext ver["path"] = p+raw_ext
ver["url"] = quote(ver["path"]) ver["url"] = quote(ver["path"])
with open(ver["path"], "w") as f: with open(ver["path"], "w") as f:
f.write(text.encode("utf-8")) f.write(text)
# once the content is settled, compute a hash # once the content is settled, compute a hash
# and link it in the metadata! # and link it in the metadata!
@ -180,7 +190,7 @@ def main (args):
if args.css: if args.css:
links.append({"href":args.css, "rel":"stylesheet"}) links.append({"href":args.css, "rel":"stylesheet"})
# todo, make this process reflect which files actually were made # todo, make this process reflect which files actually were made
versionbaseurl = quote(padid.encode("utf-8")) versionbaseurl = quote(padid)
links.append({"href":versions[0]["url"], "rel":"alternate", "type":"text/html", "title":"Etherpad"}) links.append({"href":versions[0]["url"], "rel":"alternate", "type":"text/html", "title":"Etherpad"})
links.append({"href":versionbaseurl+raw_ext, "rel":"alternate", "type":"text/plain", "title":"Plain text"}) links.append({"href":versionbaseurl+raw_ext, "rel":"alternate", "type":"text/plain", "title":"Plain text"})
links.append({"href":versionbaseurl+".raw.html", "rel":"alternate", "type":"text/html", "title":"HTML"}) links.append({"href":versionbaseurl+".raw.html", "rel":"alternate", "type":"text/html", "title":"HTML"})
@ -198,11 +208,12 @@ def main (args):
html = html['data']['html'] html = html['data']['html']
ver["path"] = p+".diff.html" ver["path"] = p+".diff.html"
ver["url"] = quote(ver["path"]) ver["url"] = quote(ver["path"])
doc = html5lib.parse(html.encode("utf-8"), treebuilder="etree", encoding="utf-8", namespaceHTMLElements=False) # doc = html5lib.parse(html, treebuilder="etree", override_encoding="utf-8", namespaceHTMLElements=False)
doc = html5lib.parse(html, treebuilder="etree", namespaceHTMLElements=False)
html5tidy(doc, indent=True, title=padid, scripts=args.script, links=links) html5tidy(doc, indent=True, title=padid, scripts=args.script, links=links)
with open(ver["path"], "w") as f: with open(ver["path"], "w") as f:
# f.write(html.encode("utf-8")) # f.write(html.encode("utf-8"))
print(ET.tostring(doc, method="html", encoding="utf-8"), file=f) print(ET.tostring(doc, method="html", encoding="unicode"), file=f)
# Process text, html, dhtml, all options # Process text, html, dhtml, all options
if args.all or args.html: if args.all or args.html:
@ -218,7 +229,7 @@ def main (args):
html5tidy(doc, indent=True, title=padid, scripts=args.script, links=links) html5tidy(doc, indent=True, title=padid, scripts=args.script, links=links)
with open(ver["path"], "w") as f: with open(ver["path"], "w") as f:
# f.write(html.encode("utf-8")) # f.write(html.encode("utf-8"))
print (ET.tostring(doc, method="html", encoding="utf-8"), file=f) print (ET.tostring(doc, method="html", encoding="unicode"), file=f)
# output meta # output meta
if args.all or args.meta: if args.all or args.meta:

7
etherdump/commands/revisionscount.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json import json
from urllib import urlencode from urllib import urlencode
@ -20,7 +19,7 @@ def main(args):
data['padID'] = args.padid.encode("utf-8") data['padID'] = args.padid.encode("utf-8")
requesturl = apiurl+'getRevisionsCount?'+urlencode(data) requesturl = apiurl+'getRevisionsCount?'+urlencode(data)
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: else:
results = json.load(urlopen(requesturl))['data']['revisions'] results = json.load(urlopen(requesturl))['data']['revisions']
print results print (results)

66
etherdump/commands/sethtml.py

@ -0,0 +1,66 @@
from __future__ import print_function
from argparse import ArgumentParser
import json, sys
from urllib import urlencode
from urllib2 import urlopen, HTTPError, URLError
import requests
LIMIT_BYTES = 100*1000
def main(args):
p = ArgumentParser("calls the setHTML API function for the given padid")
p.add_argument("padid", help="the padid")
p.add_argument("--html", default=None, help="html, default: read from stdin")
p.add_argument("--padinfo", default=".etherdump/settings.json", help="settings, default: .etherdump/settings.json")
p.add_argument("--showurl", default=False, action="store_true")
# p.add_argument("--format", default="text", help="output format, can be: text, json; default: text")
p.add_argument("--create", default=False, action="store_true", help="flag to create pad if necessary")
p.add_argument("--limit", default=False, action="store_true", help="limit text to 100k (etherpad limit)")
args = p.parse_args(args)
with open(args.padinfo) as f:
info = json.load(f)
apiurl = info.get("apiurl")
# apiurl = "{0[protocol]}://{0[hostname]}:{0[port]}{0[apiurl]}{0[apiversion]}/".format(info)
# data = {}
# data['apikey'] = info['apikey']
# data['padID'] = args.padid # is utf-8 encoded
createPad = False
if args.create:
# check if it's in fact necessary
requesturl = apiurl+'getRevisionsCount?'+urlencode({'apikey': info['apikey'], 'padID': args.padid})
results = json.load(urlopen(requesturl))
print (json.dumps(results, indent=2), file=sys.stderr)
if results['code'] != 0:
createPad = True
if args.html:
html = args.html
else:
html = sys.stdin.read()
params = {}
params['apikey'] = info['apikey']
params['padID'] = args.padid
if createPad:
requesturl = apiurl+'createPad'
if args.showurl:
print (requesturl)
results = requests.post(requesturl, params=params, data={'text': ''}) # json.load(urlopen(requesturl))
results = json.loads(results.text)
print (json.dumps(results, indent=2))
if len(html) > LIMIT_BYTES and args.limit:
print ("limiting", len(text), LIMIT_BYTES, file=sys.stderr)
html = html[:LIMIT_BYTES]
requesturl = apiurl+'setHTML'
if args.showurl:
print (requesturl)
# params['html'] = html
results = requests.post(requesturl, params={'apikey': info['apikey']}, data={'apikey': info['apikey'], 'padID': args.padid, 'html': html}) # json.load(urlopen(requesturl))
results = json.loads(results.text)
print (json.dumps(results, indent=2))

18
etherdump/commands/settext.py

@ -1,5 +1,4 @@
#!/usr/bin/env python from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json, sys import json, sys
from urllib import urlencode from urllib import urlencode
@ -32,7 +31,7 @@ def main(args):
if args.create: if args.create:
requesturl = apiurl+'getRevisionsCount?'+urlencode(data) requesturl = apiurl+'getRevisionsCount?'+urlencode(data)
results = json.load(urlopen(requesturl)) results = json.load(urlopen(requesturl))
# print json.dumps(results, indent=2) # print (json.dumps(results, indent=2))
if results['code'] != 0: if results['code'] != 0:
createPad = True createPad = True
@ -42,7 +41,7 @@ def main(args):
text = sys.stdin.read() text = sys.stdin.read()
if len(text) > LIMIT_BYTES and args.limit: if len(text) > LIMIT_BYTES and args.limit:
print "limiting", len(text), LIMIT_BYTES print ("limiting", len(text), LIMIT_BYTES)
text = text[:LIMIT_BYTES] text = text[:LIMIT_BYTES]
data['text'] = text data['text'] = text
@ -53,8 +52,9 @@ def main(args):
requesturl = apiurl+'setText' requesturl = apiurl+'setText'
if args.showurl: if args.showurl:
print requesturl print (requesturl)
else: results = requests.post(requesturl, params=data) # json.load(urlopen(requesturl))
results = requests.post(requesturl, data=data) # json.load(urlopen(requesturl)) results = json.loads(results.text)
results = json.loads(results.text) if results['code'] != 0:
print json.dumps(results, indent=2) print (u"setText: ERROR ({0}) on pad {1}: {2}".format(results['code'], args.padid, results['message']).encode("utf-8"))
# json.dumps(results, indent=2)

3
etherdump/commands/showmeta.py

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import print_function from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import json, sys, re import json, sys, re
@ -27,6 +26,6 @@ def main (args):
meta = json.load(f) meta = json.load(f)
formatstr = args.format.decode("utf-8") formatstr = args.format.decode("utf-8")
formatstr = re.sub(ur"{(\w+)}", r"{0[\1]}", formatstr) formatstr = re.sub(r"{(\w+)}", r"{0[\1]}", formatstr)
print (formatstr.format(meta).encode("utf-8")) print (formatstr.format(meta).encode("utf-8"))

1
etherdump/commands/status.py

@ -1,4 +1,3 @@
#!/usr/bin/env python
from __future__ import print_function from __future__ import print_function
from argparse import ArgumentParser from argparse import ArgumentParser
import sys, json, re, os import sys, json, re, os

4
setup.py

@ -1,4 +1,4 @@
#!/usr/bin/env python #!/usr/bin/env python3
import distutils.command.install_lib import distutils.command.install_lib
from distutils.core import setup from distutils.core import setup
import os import os
@ -26,7 +26,7 @@ setup(
#package_data={'activearchives': find("activearchives", "templates/") + find("activearchives", "data/")}, #package_data={'activearchives': find("activearchives", "templates/") + find("activearchives", "data/")},
package_data={'etherdump': find("etherdump", "data/")}, package_data={'etherdump': find("etherdump", "data/")},
scripts=['bin/etherdump'], scripts=['bin/etherdump'],
url='http://activearchives.org/wiki/Etherdump/', url='http://activearchives.org/wiki/Etherdump',
license='LICENSE.txt', license='LICENSE.txt',
description='Etherdump an etherpad publishing & archiving system', description='Etherdump an etherpad publishing & archiving system',
# long_description=open('README.md').read(), # long_description=open('README.md').read(),

Loading…
Cancel
Save