From 8df9c56ac1cdca2a0ca66ceb9098ee4bd9efe1ba Mon Sep 17 00:00:00 2001 From: Michael Murtaugh Date: Fri, 12 Jan 2018 14:42:55 +0100 Subject: [PATCH] python3 --- README.md | 8 ++++ bin/etherdump | 11 ++--- etherdump/commands/common.py | 40 ++++++++++++----- etherdump/commands/creatediffhtml.py | 9 ++-- etherdump/commands/deletepad.py | 9 ++-- etherdump/commands/dumpcsv.py | 1 - etherdump/commands/gethtml.py | 9 ++-- etherdump/commands/gettext.py | 23 ++++++---- etherdump/commands/html5tidy.py | 1 - etherdump/commands/index.py | 1 - etherdump/commands/init.py | 32 +++++++++----- etherdump/commands/join.py | 5 +-- etherdump/commands/list.py | 24 ++++++---- etherdump/commands/listauthors.py | 9 ++-- etherdump/commands/pull.py | 47 ++++++++++++-------- etherdump/commands/revisionscount.py | 7 ++- etherdump/commands/sethtml.py | 66 ++++++++++++++++++++++++++++ etherdump/commands/settext.py | 18 ++++---- etherdump/commands/showmeta.py | 3 +- etherdump/commands/status.py | 1 - setup.py | 4 +- 21 files changed, 223 insertions(+), 105 deletions(-) create mode 100644 etherdump/commands/sethtml.py diff --git a/README.md b/README.md index 297059b..e92f697 100644 --- a/README.md +++ b/README.md @@ -66,3 +66,11 @@ Originally designed for use at: [constant](http://etherdump.constantvzw.org/). ----------------------------------------------- Preparations for [Machine Research](https://machineresearch.wordpress.com/) [2](http://constantvzw.org/site/Machine-Research,2646.html) + +6 Oct 2017 +---------------------- +Feature request from PW: When deleting a previously public document, generate a page / pages with an explanation (along the lines of "This document was previously public but has been marked .... maybe give links to search"). + +3 Nov 2017 +--------------- +machineresearch seems to be __NOPUBLISH__ but still exists (also in recentchanges) diff --git a/bin/etherdump b/bin/etherdump index 04fda7b..9bdefee 100755 --- a/bin/etherdump +++ b/bin/etherdump @@ -1,5 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 +from __future__ import print_function import sys usage = """Usage: @@ -31,13 +32,13 @@ try: else: args = sys.argv[2:] except IndexError: - print usage + print (usage) sys.exit(0) try: # http://stackoverflow.com/questions/301134/dynamic-module-import-in-python cmdmod = __import__("etherdump.commands.%s" % cmd, fromlist=["etherdump.commands"]) cmdmod.main(args) -except ImportError, e: - print "Error performing command '{0}'\n(python said: {1})\n".format(cmd, e) - print usage +except ImportError as e: + print ("Error performing command '{0}'\n(python said: {1})\n".format(cmd, e)) + print (usage) diff --git a/etherdump/commands/common.py b/etherdump/commands/common.py index eadd28d..c68bdaf 100644 --- a/etherdump/commands/common.py +++ b/etherdump/commands/common.py @@ -1,10 +1,22 @@ from __future__ import print_function import re, os, json, sys -from urllib import quote_plus, unquote_plus from math import ceil, floor -from urllib2 import urlopen, HTTPError from time import sleep +try: + # python2 + from urlparse import urlparse, urlunparse + from urllib2 import urlopen, URLError, HTTPError + from urllib import urlencode + from urllib import quote_plus, unquote_plus + from htmlentitydefs import name2codepoint + + input = raw_input +except ImportError: + # python3 + from urllib.parse import urlparse, urlunparse, urlencode, quote_plus, unquote_plus + from urllib.request import urlopen, URLError, HTTPError + from html.entities import name2codepoint groupnamepat = re.compile(r"^g\.(\w+)\$") def splitpadname (padid): @@ -17,15 +29,19 @@ def splitpadname (padid): def padurl (padid, ): return padid -def padpath (padid, pub_path=u"", group_path=u""): +def padpath (padid, pub_path=u"", group_path=u"", normalize=False): g, p = splitpadname(padid) - if type(g) == unicode: - g = g.encode("utf-8") - if type(p) == unicode: - p = p.encode("utf-8") + # if type(g) == unicode: + # g = g.encode("utf-8") + # if type(p) == unicode: + # p = p.encode("utf-8") p = quote_plus(p) - # p = p.replace(" ", "_") - # p = p.replace("*", "-") + if normalize: + p = p.replace(" ", "_") + p = p.replace("(", "") + p = p.replace(")", "") + p = p.replace("?", "") + p = p.replace("'", "") if g: return os.path.join(group_path, g, p) else: @@ -49,6 +65,7 @@ def getjson (url, max_retry=3, retry_sleep_time=0.5): try: f = urlopen(url) data = f.read() + data = data.decode("utf-8") rurl = f.geturl() f.close() ret.update(json.loads(data)) @@ -77,13 +94,12 @@ def progressbar (i, num, label="", file=sys.stderr): bars = int(ceil(p*20)) bar = ("*"*bars) + ("-"*(20-bars)) msg = u"\r{0} {1}/{2} {3}... ".format(bar, (i+1), num, label) - sys.stderr.write(msg.encode("utf-8")) + sys.stderr.write(msg) sys.stderr.flush() # Python developer Fredrik Lundh (author of elementtree, among other things) has such a function on his website, which works with decimal, hex and named entities: -import re, htmlentitydefs ## # Removes HTML or XML character references and entities from a text string. # @@ -104,7 +120,7 @@ def unescape(text): else: # named entity try: - text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) + text = unichr(name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is diff --git a/etherdump/commands/creatediffhtml.py b/etherdump/commands/creatediffhtml.py index 06433e6..ea3f7fa 100644 --- a/etherdump/commands/creatediffhtml.py +++ b/etherdump/commands/creatediffhtml.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json from urllib import urlencode @@ -27,13 +26,13 @@ def main(args): data['rev'] = args.rev requesturl = apiurl+'createDiffHTML?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: try: results = json.load(urlopen(requesturl))['data'] if args.format == "json": - print json.dumps(results) + print (json.dumps(results)) else: - print results['html'].encode("utf-8") + print (results['html'].encode("utf-8")) except HTTPError as e: pass \ No newline at end of file diff --git a/etherdump/commands/deletepad.py b/etherdump/commands/deletepad.py index 6ac878f..7c97584 100644 --- a/etherdump/commands/deletepad.py +++ b/etherdump/commands/deletepad.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json from urllib import urlencode @@ -23,11 +22,11 @@ def main(args): data['padID'] = args.padid # is utf-8 encoded requesturl = apiurl+'deletePad?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: results = json.load(urlopen(requesturl)) if args.format == "json": - print json.dumps(results) + print (json.dumps(results)) else: if results['data']: - print results['data']['text'].encode("utf-8") + print (results['data']['text'].encode("utf-8")) diff --git a/etherdump/commands/dumpcsv.py b/etherdump/commands/dumpcsv.py index 0c4d761..aa6e971 100644 --- a/etherdump/commands/dumpcsv.py +++ b/etherdump/commands/dumpcsv.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python from __future__ import print_function from argparse import ArgumentParser import sys, json, re diff --git a/etherdump/commands/gethtml.py b/etherdump/commands/gethtml.py index c19e796..ca0c79a 100644 --- a/etherdump/commands/gethtml.py +++ b/etherdump/commands/gethtml.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json from urllib import urlencode @@ -26,10 +25,10 @@ def main(args): data['rev'] = args.rev requesturl = apiurl+'getHTML?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: results = json.load(urlopen(requesturl))['data'] if args.format == "json": - print json.dumps(results) + print (json.dumps(results)) else: - print results['html'].encode("utf-8") + print (results['html'].encode("utf-8")) diff --git a/etherdump/commands/gettext.py b/etherdump/commands/gettext.py index f55d8e8..45285e7 100644 --- a/etherdump/commands/gettext.py +++ b/etherdump/commands/gettext.py @@ -1,9 +1,14 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json, sys -from urllib import urlencode -from urllib2 import urlopen, HTTPError, URLError +try: + # python2 + from urllib2 import urlopen, URLError, HTTPError + from urllib import urlencode +except ImportError: + # python3 + from urllib.parse import urlencode + from urllib.request import urlopen, URLError, HTTPError def main(args): @@ -26,11 +31,13 @@ def main(args): data['rev'] = args.rev requesturl = apiurl+'getText?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: - results = json.load(urlopen(requesturl)) + resp = urlopen(requesturl).read() + resp = resp.decode("utf-8") + results = json.loads(resp) if args.format == "json": - print json.dumps(results) + print (json.dumps(results)) else: if results['data']: - sys.stdout.write(results['data']['text'].encode("utf-8")) + sys.stdout.write(results['data']['text']) diff --git a/etherdump/commands/html5tidy.py b/etherdump/commands/html5tidy.py index 2c643a5..fc06d32 100644 --- a/etherdump/commands/html5tidy.py +++ b/etherdump/commands/html5tidy.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python from __future__ import print_function from html5lib import parse import os, sys diff --git a/etherdump/commands/index.py b/etherdump/commands/index.py index d5905bd..23d09f5 100644 --- a/etherdump/commands/index.py +++ b/etherdump/commands/index.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python from __future__ import print_function from argparse import ArgumentParser import sys, json, re, os, urlparse diff --git a/etherdump/commands/init.py b/etherdump/commands/init.py index 184d4d2..1f229a5 100644 --- a/etherdump/commands/init.py +++ b/etherdump/commands/init.py @@ -1,8 +1,17 @@ from __future__ import print_function from argparse import ArgumentParser -from urlparse import urlparse, urlunparse -from urllib2 import urlopen, URLError, HTTPError -from urllib import urlencode + +try: + # python2 + from urlparse import urlparse, urlunparse + from urllib2 import urlopen, URLError, HTTPError + from urllib import urlencode + input = raw_input +except ImportError: + # python3 + from urllib.parse import urlparse, urlunparse, urlencode + from urllib.request import urlopen, URLError, HTTPError + import json, os, sys def get_api(url, cmd=None, data=None, verbose=False): @@ -13,7 +22,9 @@ def get_api(url, cmd=None, data=None, verbose=False): # data['apikey'] = "7c8faa070c97f83d8f705c935a32d5141f89cbaa2158042fa92e8ddad5dbc5e1" if verbose: print ("trying", useurl, file=sys.stderr) - resp = json.load(urlopen(useurl)) + resp = urlopen(useurl).read() + resp = resp.decode("utf-8") + resp = json.loads(resp) if "code" in resp and "message" in resp: return resp except ValueError as e: @@ -25,10 +36,11 @@ def get_api(url, cmd=None, data=None, verbose=False): print (" HTTPError", e, file=sys.stderr) if e.code == 401: # Unauthorized is how the API responds to an incorrect API key - resp = json.load(e) - if "code" in resp and "message" in resp: - # print ("returning", resp, file=sys.stderr) - return resp + return {"code": 401, "message": e} + # resp = json.load(e) + # if "code" in resp and "message" in resp: + # # print ("returning", resp, file=sys.stderr) + # return resp def tryapiurl (url, verbose=False): """ @@ -100,7 +112,7 @@ def main(args): if apiurl: # print ("Got APIURL: {0}".format(apiurl)) break - apiurl = raw_input("Please type the URL of the etherpad: ").strip() + apiurl = input("Please type the URL of the etherpad: ").strip() padinfo["apiurl"] = apiurl apikey = args.apikey while True: @@ -112,7 +124,7 @@ def main(args): else: print ("bad") print ("The APIKEY is the contents of the file APIKEY.txt in the etherpad folder", file=sys.stderr) - apikey = raw_input("Please paste the APIKEY: ").strip() + apikey = input("Please paste the APIKEY: ").strip() padinfo["apikey"] = apikey with open(padinfopath, "w") as f: diff --git a/etherdump/commands/join.py b/etherdump/commands/join.py index 5076568..fb6f97c 100644 --- a/etherdump/commands/join.py +++ b/etherdump/commands/join.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json, os, re from urllib import urlencode @@ -41,5 +40,5 @@ def main(args): pass for i in items: newloc = os.path.join(itembase, i) - print "'{0}' => '{1}'".format(i, newloc) + print ("'{0}' => '{1}'".format(i, newloc)) os.rename(i, newloc) diff --git a/etherdump/commands/list.py b/etherdump/commands/list.py index 30ae6ed..036699a 100644 --- a/etherdump/commands/list.py +++ b/etherdump/commands/list.py @@ -1,10 +1,18 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json -from urllib import urlencode -from urllib2 import urlopen, HTTPError, URLError -from common import getjson +import sys +from etherdump.commands.common import getjson +try: + # python2 + from urlparse import urlparse, urlunparse + from urllib2 import urlopen, URLError, HTTPError + from urllib import urlencode + input = raw_input +except ImportError: + # python3 + from urllib.parse import urlparse, urlunparse, urlencode + from urllib.request import urlopen, URLError, HTTPError def main (args): p = ArgumentParser("call listAllPads and print the results") @@ -21,12 +29,12 @@ def main (args): data['apikey'] = info['apikey'] requesturl = apiurl+'listAllPads?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: results = getjson(requesturl)['data']['padIDs'] if args.format == "json": - print json.dumps(results) + print (json.dumps(results)) else: for r in results: - print r.encode("utf-8") + print (r) diff --git a/etherdump/commands/listauthors.py b/etherdump/commands/listauthors.py index 7679f88..c9bbf4a 100644 --- a/etherdump/commands/listauthors.py +++ b/etherdump/commands/listauthors.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json from urllib import urlencode @@ -22,11 +21,11 @@ def main(args): data['padID'] = args.padid.encode("utf-8") requesturl = apiurl+'listAuthorsOfPad?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: results = json.load(urlopen(requesturl))['data']['authorIDs'] if args.format == "json": - print json.dumps(results) + print (json.dumps(results)) else: for r in results: - print r.encode("utf-8") + print (r.encode("utf-8")) diff --git a/etherdump/commands/pull.py b/etherdump/commands/pull.py index 8a81653..7eae85c 100644 --- a/etherdump/commands/pull.py +++ b/etherdump/commands/pull.py @@ -1,13 +1,20 @@ -#!/usr/bin/env python from __future__ import print_function from argparse import ArgumentParser import sys, json, re, os from datetime import datetime -from urllib import urlencode, quote -from urllib2 import HTTPError -from common import * + +try: + # python2 + from urllib2 import urlopen, URLError, HTTPError + from urllib import urlencode +except ImportError: + # python3 + from urllib.parse import urlencode, quote + from urllib.request import urlopen, URLError, HTTPError + +from etherdump.commands.common import * from time import sleep -from html5tidy import html5tidy +from etherdump.commands.html5tidy import html5tidy import html5lib from xml.etree import ElementTree as ET # debugging @@ -49,6 +56,9 @@ def main (args): p.add_argument("--output", default=False, action="store_true", help="output changed padids on stdout") p.add_argument("--force", default=False, action="store_true", help="reload, even if revisions count matches previous") p.add_argument("--no-raw-ext", default=False, action="store_true", help="save plain text as padname with no (additional) extension") + p.add_argument("--fix-names", default=False, action="store_true", help="normalize padid's (no spaces, special control chars) for use in file names") + + p.add_argument("--filter-ext", default=None, help="filter pads by extension") p.add_argument("--css", default="/styles.css", help="add css url to output pages, default: /styles.css") p.add_argument("--script", default="/versions.js", help="add script url to output pages, default: /versions.js") @@ -79,7 +89,7 @@ def main (args): progressbar(i, numpads, padid) data['padID'] = padid.encode("utf-8") - p = padpath(padid, args.pub, args.group) + p = padpath(padid, args.pub, args.group, args.fix_names) if args.folder: p = os.path.join(p, padid.encode("utf-8")) @@ -89,8 +99,8 @@ def main (args): skip = False padurlbase = re.sub(r"api/1.2.9/$", "p/", info["apiurl"]) meta = {} - if type(padurlbase) == unicode: - padurlbase = padurlbase.encode("utf-8") + # if type(padurlbase) == unicode: + # padurlbase = padurlbase.encode("utf-8") while True: try: if os.path.exists(metapath): @@ -101,10 +111,10 @@ def main (args): skip=True break - meta['padid'] = padid.encode("utf-8") + meta['padid'] = padid # .encode("utf-8") versions = meta["versions"] = [] versions.append({ - "url": padurlbase + quote(padid.encode("utf-8")), # this quote was really important for dealing with rogue chars like \xa0 in a padid; + "url": padurlbase + quote(padid), "type": "pad", "code": 200 }) @@ -129,13 +139,13 @@ def main (args): except HTTPError as e: tries += 1 if tries > 3: - print ("Too many failures ({0}), skipping".format(padid).encode("utf-8"), file=sys.stderr) + print ("Too many failures ({0}), skipping".format(padid), file=sys.stderr) skip=True break else: sleep(3) except TypeError as e: - print ("Type Error loading pad {0} (phantom pad?), skipping".format(padid).encode("utf-8"), file=sys.stderr) + print ("Type Error loading pad {0} (phantom pad?), skipping".format(padid), file=sys.stderr) skip=True break @@ -145,7 +155,7 @@ def main (args): count += 1 if args.output: - print (padid.encode("utf-8")) + print (padid) if args.all or (args.meta or args.text or args.html or args.dhtml): try: @@ -172,7 +182,7 @@ def main (args): ver["path"] = p+raw_ext ver["url"] = quote(ver["path"]) with open(ver["path"], "w") as f: - f.write(text.encode("utf-8")) + f.write(text) # once the content is settled, compute a hash # and link it in the metadata! @@ -180,7 +190,7 @@ def main (args): if args.css: links.append({"href":args.css, "rel":"stylesheet"}) # todo, make this process reflect which files actually were made - versionbaseurl = quote(padid.encode("utf-8")) + versionbaseurl = quote(padid) links.append({"href":versions[0]["url"], "rel":"alternate", "type":"text/html", "title":"Etherpad"}) links.append({"href":versionbaseurl+raw_ext, "rel":"alternate", "type":"text/plain", "title":"Plain text"}) links.append({"href":versionbaseurl+".raw.html", "rel":"alternate", "type":"text/html", "title":"HTML"}) @@ -198,11 +208,12 @@ def main (args): html = html['data']['html'] ver["path"] = p+".diff.html" ver["url"] = quote(ver["path"]) - doc = html5lib.parse(html.encode("utf-8"), treebuilder="etree", encoding="utf-8", namespaceHTMLElements=False) + # doc = html5lib.parse(html, treebuilder="etree", override_encoding="utf-8", namespaceHTMLElements=False) + doc = html5lib.parse(html, treebuilder="etree", namespaceHTMLElements=False) html5tidy(doc, indent=True, title=padid, scripts=args.script, links=links) with open(ver["path"], "w") as f: # f.write(html.encode("utf-8")) - print(ET.tostring(doc, method="html", encoding="utf-8"), file=f) + print(ET.tostring(doc, method="html", encoding="unicode"), file=f) # Process text, html, dhtml, all options if args.all or args.html: @@ -218,7 +229,7 @@ def main (args): html5tidy(doc, indent=True, title=padid, scripts=args.script, links=links) with open(ver["path"], "w") as f: # f.write(html.encode("utf-8")) - print (ET.tostring(doc, method="html", encoding="utf-8"), file=f) + print (ET.tostring(doc, method="html", encoding="unicode"), file=f) # output meta if args.all or args.meta: diff --git a/etherdump/commands/revisionscount.py b/etherdump/commands/revisionscount.py index d85f5ef..6612894 100644 --- a/etherdump/commands/revisionscount.py +++ b/etherdump/commands/revisionscount.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json from urllib import urlencode @@ -20,7 +19,7 @@ def main(args): data['padID'] = args.padid.encode("utf-8") requesturl = apiurl+'getRevisionsCount?'+urlencode(data) if args.showurl: - print requesturl + print (requesturl) else: results = json.load(urlopen(requesturl))['data']['revisions'] - print results + print (results) diff --git a/etherdump/commands/sethtml.py b/etherdump/commands/sethtml.py new file mode 100644 index 0000000..7b6a0cf --- /dev/null +++ b/etherdump/commands/sethtml.py @@ -0,0 +1,66 @@ +from __future__ import print_function +from argparse import ArgumentParser +import json, sys +from urllib import urlencode +from urllib2 import urlopen, HTTPError, URLError +import requests + + +LIMIT_BYTES = 100*1000 + +def main(args): + p = ArgumentParser("calls the setHTML API function for the given padid") + p.add_argument("padid", help="the padid") + p.add_argument("--html", default=None, help="html, default: read from stdin") + p.add_argument("--padinfo", default=".etherdump/settings.json", help="settings, default: .etherdump/settings.json") + p.add_argument("--showurl", default=False, action="store_true") + # p.add_argument("--format", default="text", help="output format, can be: text, json; default: text") + p.add_argument("--create", default=False, action="store_true", help="flag to create pad if necessary") + p.add_argument("--limit", default=False, action="store_true", help="limit text to 100k (etherpad limit)") + args = p.parse_args(args) + + with open(args.padinfo) as f: + info = json.load(f) + apiurl = info.get("apiurl") + # apiurl = "{0[protocol]}://{0[hostname]}:{0[port]}{0[apiurl]}{0[apiversion]}/".format(info) +# data = {} +# data['apikey'] = info['apikey'] +# data['padID'] = args.padid # is utf-8 encoded + + createPad = False + if args.create: + # check if it's in fact necessary + requesturl = apiurl+'getRevisionsCount?'+urlencode({'apikey': info['apikey'], 'padID': args.padid}) + results = json.load(urlopen(requesturl)) + print (json.dumps(results, indent=2), file=sys.stderr) + if results['code'] != 0: + createPad = True + + if args.html: + html = args.html + else: + html = sys.stdin.read() + + params = {} + params['apikey'] = info['apikey'] + params['padID'] = args.padid + + if createPad: + requesturl = apiurl+'createPad' + if args.showurl: + print (requesturl) + results = requests.post(requesturl, params=params, data={'text': ''}) # json.load(urlopen(requesturl)) + results = json.loads(results.text) + print (json.dumps(results, indent=2)) + + if len(html) > LIMIT_BYTES and args.limit: + print ("limiting", len(text), LIMIT_BYTES, file=sys.stderr) + html = html[:LIMIT_BYTES] + + requesturl = apiurl+'setHTML' + if args.showurl: + print (requesturl) + # params['html'] = html + results = requests.post(requesturl, params={'apikey': info['apikey']}, data={'apikey': info['apikey'], 'padID': args.padid, 'html': html}) # json.load(urlopen(requesturl)) + results = json.loads(results.text) + print (json.dumps(results, indent=2)) diff --git a/etherdump/commands/settext.py b/etherdump/commands/settext.py index 2ffe1f7..9c29729 100644 --- a/etherdump/commands/settext.py +++ b/etherdump/commands/settext.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python - +from __future__ import print_function from argparse import ArgumentParser import json, sys from urllib import urlencode @@ -32,7 +31,7 @@ def main(args): if args.create: requesturl = apiurl+'getRevisionsCount?'+urlencode(data) results = json.load(urlopen(requesturl)) - # print json.dumps(results, indent=2) + # print (json.dumps(results, indent=2)) if results['code'] != 0: createPad = True @@ -42,7 +41,7 @@ def main(args): text = sys.stdin.read() if len(text) > LIMIT_BYTES and args.limit: - print "limiting", len(text), LIMIT_BYTES + print ("limiting", len(text), LIMIT_BYTES) text = text[:LIMIT_BYTES] data['text'] = text @@ -53,8 +52,9 @@ def main(args): requesturl = apiurl+'setText' if args.showurl: - print requesturl - else: - results = requests.post(requesturl, data=data) # json.load(urlopen(requesturl)) - results = json.loads(results.text) - print json.dumps(results, indent=2) + print (requesturl) + results = requests.post(requesturl, params=data) # json.load(urlopen(requesturl)) + results = json.loads(results.text) + if results['code'] != 0: + print (u"setText: ERROR ({0}) on pad {1}: {2}".format(results['code'], args.padid, results['message']).encode("utf-8")) + # json.dumps(results, indent=2) diff --git a/etherdump/commands/showmeta.py b/etherdump/commands/showmeta.py index 97976f1..827f340 100644 --- a/etherdump/commands/showmeta.py +++ b/etherdump/commands/showmeta.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python from __future__ import print_function from argparse import ArgumentParser import json, sys, re @@ -27,6 +26,6 @@ def main (args): meta = json.load(f) formatstr = args.format.decode("utf-8") - formatstr = re.sub(ur"{(\w+)}", r"{0[\1]}", formatstr) + formatstr = re.sub(r"{(\w+)}", r"{0[\1]}", formatstr) print (formatstr.format(meta).encode("utf-8")) diff --git a/etherdump/commands/status.py b/etherdump/commands/status.py index d95f09c..e2961f0 100644 --- a/etherdump/commands/status.py +++ b/etherdump/commands/status.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python from __future__ import print_function from argparse import ArgumentParser import sys, json, re, os diff --git a/setup.py b/setup.py index bb3b257..3397661 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import distutils.command.install_lib from distutils.core import setup import os @@ -26,7 +26,7 @@ setup( #package_data={'activearchives': find("activearchives", "templates/") + find("activearchives", "data/")}, package_data={'etherdump': find("etherdump", "data/")}, scripts=['bin/etherdump'], - url='http://activearchives.org/wiki/Etherdump/', + url='http://activearchives.org/wiki/Etherdump', license='LICENSE.txt', description='Etherdump an etherpad publishing & archiving system', # long_description=open('README.md').read(),