2020-01-16 17:54:13 +01:00
|
|
|
# encoding=utf8
|
|
|
|
import sys
|
|
|
|
import pypandoc
|
|
|
|
import PyPDF2
|
|
|
|
from PyPDF2 import PdfFileMerger, PdfFileReader
|
|
|
|
from weasyprint import HTML, CSS
|
|
|
|
from weasyprint.fonts import FontConfiguration
|
|
|
|
|
|
|
|
from flask import send_file, Flask, Response, url_for, render_template, Markup, jsonify, redirect, request, flash, session, make_response
|
2019-04-17 19:31:33 +02:00
|
|
|
from config import Config
|
|
|
|
import json
|
|
|
|
import os
|
2019-05-12 06:11:51 +02:00
|
|
|
import datetime as dt
|
|
|
|
from datetime import datetime
|
2019-04-17 19:31:33 +02:00
|
|
|
from pprint import pprint
|
|
|
|
import re
|
2019-05-12 06:11:51 +02:00
|
|
|
from PIL import Image, ImageDraw, ImageFont
|
|
|
|
import numpy as np
|
|
|
|
from itertools import zip_longest
|
2019-12-03 11:19:56 +01:00
|
|
|
import collections
|
2019-12-03 13:26:42 +01:00
|
|
|
import random
|
|
|
|
import string
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
app = Flask(__name__, static_url_path='', static_folder="static", template_folder="templates")
|
|
|
|
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
|
|
|
|
app.config.from_object(Config)
|
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
######################################################################################
|
|
|
|
# SETTING THE VARIABLES
|
|
|
|
######################################################################################
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
# setting variables for holding paths, folder names and the one file for description
|
2019-05-20 16:23:50 +02:00
|
|
|
path = "/static/files/"
|
2019-04-17 19:31:33 +02:00
|
|
|
jsonfiles = [] #json files
|
|
|
|
fullpathjsonfiles = [] #fullpath for some situations
|
|
|
|
listingfiles= [] #fullpaths
|
|
|
|
listingdirectories = [] #paths
|
|
|
|
thefile = None #selected file for description
|
|
|
|
positioninarray = 8 #counter
|
|
|
|
listofdicts=[] #to be able to import and use json content
|
|
|
|
datafromjson = []
|
2019-12-03 11:55:52 +01:00
|
|
|
max_wordpath_items = 500 # limit the nr. of items, as to prevent possible glitches with bots
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
pathofwords = []
|
|
|
|
pathofnumbers = []
|
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
#VARS FOR THE SESSIONS
|
2019-12-03 13:26:42 +01:00
|
|
|
# We don't want to store the secret key in git, but also don't really care what it is
|
|
|
|
# so generate it and store it to a file that's not in git:
|
|
|
|
current_dir = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
secret_key_file = os.path.join(current_dir, 'secret.key')
|
|
|
|
if os.path.exists(secret_key_file):
|
|
|
|
with open(secret_key_file, 'r') as fp:
|
|
|
|
secret_key = fp.read()
|
|
|
|
else:
|
|
|
|
secret_key = ''.join(random.choice(string.ascii_lowercase) for i in range(100))
|
|
|
|
with open(secret_key_file, 'w') as fp:
|
|
|
|
fp.write(secret_key)
|
|
|
|
app.secret_key = secret_key
|
|
|
|
#app.config['SESSION_TYPE'] = 'filesystem' # works only for flask-session (we use native flask cookie-sessions)
|
2019-05-12 06:11:51 +02:00
|
|
|
|
2019-12-03 13:12:54 +01:00
|
|
|
def clearSession():
|
|
|
|
# Flask sessions are serialised into a cookie, so we cannot use the deque here
|
2020-01-16 17:01:12 +01:00
|
|
|
session['wordpath'] = []
|
2019-12-03 13:12:54 +01:00
|
|
|
session['clicktime'] = []
|
|
|
|
session['id'] = []
|
|
|
|
session['veryfirstnow'] = datetime.now().isoformat()
|
|
|
|
|
2019-12-03 11:19:56 +01:00
|
|
|
def setupSession():
|
|
|
|
# session should be configured from within request context
|
|
|
|
# so this function should be called on each request
|
|
|
|
if 'veryfirstnow' not in session:
|
2019-12-03 13:12:54 +01:00
|
|
|
clearSession()
|
2019-05-12 06:11:51 +02:00
|
|
|
|
2019-05-20 15:56:54 +02:00
|
|
|
# preparing the index.json file for the navbar
|
|
|
|
index_dict = {}
|
|
|
|
|
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
######################################################################################
|
|
|
|
#SOME JSON AND WALK OS REALTED THINGIES
|
|
|
|
######################################################################################
|
|
|
|
|
|
|
|
|
2019-04-17 19:31:33 +02:00
|
|
|
#reading wordlist.json
|
2019-05-05 12:40:03 +02:00
|
|
|
with open('wordlist.json', 'r', encoding='utf8') as f:
|
2019-04-17 19:31:33 +02:00
|
|
|
wordlist_dict = json.load(f)
|
|
|
|
|
2019-05-20 15:56:54 +02:00
|
|
|
|
|
|
|
#listing the json paths simultaneously generating a new index.json for the navbar
|
|
|
|
for path, subdirs, files in os.walk('./static/files/'):
|
2019-04-17 19:31:33 +02:00
|
|
|
for name in files:
|
|
|
|
if name.endswith(".json"):
|
|
|
|
fullpath = os.path.join(path, name)
|
2019-05-20 15:56:54 +02:00
|
|
|
jsonfiles.append(fullpath[8:])
|
2019-04-17 19:31:33 +02:00
|
|
|
fullpathjsonfiles.append(fullpath)
|
2019-05-20 15:56:54 +02:00
|
|
|
with open (fullpath) as f:
|
|
|
|
temp_dict = json.load(f)
|
|
|
|
index_dict[temp_dict["id"]] = temp_dict["title"]
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
######################################################################################
|
|
|
|
#NOW THE REAL DEAL
|
|
|
|
######################################################################################
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
@app.route("/")
|
|
|
|
def home():
|
2019-12-03 11:19:56 +01:00
|
|
|
setupSession()
|
2019-05-29 11:54:46 +02:00
|
|
|
|
2019-05-27 12:05:42 +02:00
|
|
|
return render_template('home.html', wordlist_dict=wordlist_dict)
|
2019-12-03 11:19:56 +01:00
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
def functionsession():
|
|
|
|
return(session)
|
|
|
|
|
|
|
|
# THIS IS NOT WORKING YET DUNNO WHY
|
2019-05-24 16:59:49 +02:00
|
|
|
@app.context_processor
|
|
|
|
def context_processor():
|
|
|
|
functionsession = session['wordpath']
|
|
|
|
return dict(functionsession=functionsession)
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
@app.route('/about/')
|
|
|
|
def about():
|
2019-12-03 11:19:56 +01:00
|
|
|
setupSession()
|
2019-04-17 19:31:33 +02:00
|
|
|
return render_template('about.html')
|
|
|
|
|
2019-12-03 13:12:54 +01:00
|
|
|
@app.route('/clear')
|
|
|
|
def clear():
|
|
|
|
# return to a refer if its set, default to root
|
|
|
|
return_url = request.environ.get("HTTP_REFERER", '/')
|
|
|
|
clearSession()
|
|
|
|
return redirect(return_url)
|
|
|
|
|
2019-04-17 19:31:33 +02:00
|
|
|
@app.route('/description')
|
|
|
|
def description():
|
2019-12-03 11:19:56 +01:00
|
|
|
setupSession()
|
2019-05-29 11:54:46 +02:00
|
|
|
|
2019-04-17 19:31:33 +02:00
|
|
|
idno=request.args.get('id')
|
|
|
|
jsonfilefordescription = "files/"+idno+"/"+idno+".json"
|
|
|
|
with open("static/"+jsonfilefordescription, 'r') as f:
|
|
|
|
data_dict = json.load(f)
|
|
|
|
datafromjson = data_dict["files"]
|
2019-05-12 06:11:51 +02:00
|
|
|
|
2019-04-17 19:31:33 +02:00
|
|
|
#open json file, list filepaths in array and loop with thefile
|
|
|
|
textfile=""
|
2019-05-13 01:45:35 +02:00
|
|
|
textfiles=[]
|
2019-12-03 11:55:52 +01:00
|
|
|
namefile=[]
|
2019-04-17 19:31:33 +02:00
|
|
|
with open("static/"+jsonfilefordescription, 'r') as f:
|
|
|
|
data_dict = json.load(f)
|
|
|
|
datafromjson = data_dict["files"]
|
|
|
|
itemid = data_dict["id"]
|
2019-12-03 11:19:56 +01:00
|
|
|
# a glitch of Flask sessions: if you do session['wordpath'].append() it creates an empty list
|
|
|
|
# hence we get it, append it, and set it.
|
|
|
|
# since Flask's session variables are json serialised (to be stored in a cookie), they do not
|
|
|
|
# support collections.deque, therefore we create that on each request to limit the items
|
|
|
|
ids = collections.deque(session['id'], maxlen=max_wordpath_items)
|
|
|
|
ids.append(itemid)
|
|
|
|
session["id"] = list(ids) # ... and therefore, we have to convert it back
|
2019-04-17 19:31:33 +02:00
|
|
|
for file in datafromjson:
|
|
|
|
if file.lower().endswith(('.html')):
|
2019-12-03 11:55:52 +01:00
|
|
|
namefile.append(file)
|
2019-05-25 01:26:28 +02:00
|
|
|
with open("static/"+file,"r", encoding='utf-8') as f:
|
2019-04-17 19:31:33 +02:00
|
|
|
textfile = f.read()
|
|
|
|
textfile = Markup(textfile)
|
2019-05-13 01:45:35 +02:00
|
|
|
textfiles.append(textfile)
|
2019-12-03 11:55:52 +01:00
|
|
|
return render_template('description.html', datafromjson=datafromjson, itemid=itemid, textfiles=textfiles, idno=idno, index_dict=index_dict, namefile=namefile)
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
@app.route('/diverge', methods=['GET'])
|
|
|
|
def diverge():
|
2019-12-03 11:19:56 +01:00
|
|
|
setupSession()
|
2019-05-29 11:54:46 +02:00
|
|
|
|
2019-04-17 19:31:33 +02:00
|
|
|
searchterm=request.args.get('search')
|
2019-05-12 06:11:51 +02:00
|
|
|
now = datetime.now() #description time
|
2019-12-03 11:19:56 +01:00
|
|
|
# a glitch of Flask sessions: if you do session['wordpath'].append() it creates an empty list
|
|
|
|
# hence we get it, append it, and set it.
|
|
|
|
wp = collections.deque(session['wordpath'], maxlen=max_wordpath_items)
|
|
|
|
wp.append(searchterm)
|
|
|
|
session['wordpath'] = list(wp)
|
|
|
|
clicktime = collections.deque(session['clicktime'], maxlen=max_wordpath_items)
|
|
|
|
clicktime.append(now.isoformat()) # make sure we have a determined format to stringify
|
|
|
|
session['clicktime'] = list(clicktime)
|
2019-05-28 15:45:45 +02:00
|
|
|
return render_template('diverge.html', wordlist_dict=wordlist_dict, searchterm=searchterm, index_dict=index_dict)
|
2019-04-17 19:31:33 +02:00
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
######################################################################################
|
|
|
|
#THE SCORE STUFF
|
|
|
|
######################################################################################
|
|
|
|
|
2019-05-12 19:18:58 +02:00
|
|
|
### Add : if score is empty then add some sentence like "your score is empty"
|
|
|
|
### to be printed to the text document
|
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
@app.route("/get-file")
|
2020-01-16 17:54:13 +01:00
|
|
|
|
2019-05-12 06:11:51 +02:00
|
|
|
def get_file():
|
2019-12-03 11:19:56 +01:00
|
|
|
setupSession()
|
2019-05-12 06:11:51 +02:00
|
|
|
fullscore = None
|
|
|
|
wordpath = session["wordpath"]
|
|
|
|
idlist = session["id"]
|
|
|
|
timelist = session["clicktime"]
|
|
|
|
veryfirstnow = session['veryfirstnow']
|
2019-05-24 16:59:49 +02:00
|
|
|
clickongetfiletime = datetime.now()
|
2019-05-12 19:18:58 +02:00
|
|
|
tadam = None
|
2019-05-24 16:59:49 +02:00
|
|
|
initialtime = None
|
|
|
|
|
2020-01-16 17:54:13 +01:00
|
|
|
# # USER IP ADDRESS OBTENTION
|
2019-12-03 11:55:52 +01:00
|
|
|
if request.environ.get('HTTP_X_FORWARDED_FOR') is None:
|
|
|
|
userip = request.environ['REMOTE_ADDR']
|
|
|
|
else:
|
|
|
|
userip = request.environ['HTTP_X_FORWARDED_FOR'] # if behind a proxy
|
|
|
|
|
2019-12-03 12:13:28 +01:00
|
|
|
# Proxy might add multiple ips, take only one:
|
|
|
|
if ',' in userip:
|
|
|
|
userip = userip.split(',')[0]
|
|
|
|
|
2020-01-16 17:54:13 +01:00
|
|
|
# # CALCULATION OF INITIAL TIME BEFORE FIRST CLICK
|
2019-12-03 11:19:56 +01:00
|
|
|
if len(timelist) and not (timelist[0] is None):
|
2019-05-24 16:59:49 +02:00
|
|
|
thetime = timelist[0]
|
|
|
|
thetime = str(thetime)
|
2019-12-03 11:19:56 +01:00
|
|
|
print(thetime)
|
|
|
|
thetime = dt.datetime.strptime(thetime, "%Y-%m-%dT%H:%M:%S.%f")
|
|
|
|
firsttime = dt.datetime.strptime(veryfirstnow, "%Y-%m-%dT%H:%M:%S.%f")
|
|
|
|
initialtime = thetime - firsttime
|
2019-05-24 16:59:49 +02:00
|
|
|
initialtime = initialtime.total_seconds()
|
|
|
|
initialtime = int(initialtime)
|
|
|
|
initialtime = "."*initialtime
|
|
|
|
|
|
|
|
print(initialtime)
|
2020-01-16 17:54:13 +01:00
|
|
|
# #CALCULATE FILE NUMBER
|
2019-05-12 06:11:51 +02:00
|
|
|
|
2019-12-03 11:55:52 +01:00
|
|
|
dirListing = os.listdir("scores/")
|
|
|
|
scorenumber = len(dirListing)
|
|
|
|
|
|
|
|
# CONVERSION OF TIME INTO FORMATS THAT CAN BE USED FOR CALCULATIONS
|
2019-12-03 11:19:56 +01:00
|
|
|
timelistforoperations = []
|
2019-05-13 01:45:35 +02:00
|
|
|
for t in timelist :
|
2019-05-12 06:11:51 +02:00
|
|
|
t = str(t)
|
2019-12-03 11:19:56 +01:00
|
|
|
yo = dt.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%f")
|
2019-05-12 06:11:51 +02:00
|
|
|
timelistforoperations.append(yo)
|
|
|
|
|
|
|
|
prev = None
|
|
|
|
wholestringy = None
|
|
|
|
|
2019-05-25 01:26:28 +02:00
|
|
|
# print("veryfirstnow : "+str(veryfirstnow)+"\n")
|
2019-05-24 16:59:49 +02:00
|
|
|
|
2019-05-29 11:54:46 +02:00
|
|
|
print(wordpath)
|
|
|
|
print(timelistforoperations)
|
|
|
|
print(idlist)
|
2019-05-24 16:59:49 +02:00
|
|
|
|
2019-12-03 11:55:52 +01:00
|
|
|
# WEAVING DELAYS AND WORDS TOGETHER AS A HUGE STRING OF CHARACTERS
|
2019-05-29 11:54:46 +02:00
|
|
|
for (word, time) in zip(wordpath,timelistforoperations):
|
2019-05-12 06:11:51 +02:00
|
|
|
upperword = word.upper()
|
|
|
|
|
|
|
|
#get previous time for substraction
|
|
|
|
if not (prev is None):
|
|
|
|
difftime = time - prev
|
|
|
|
difftime = difftime.total_seconds()
|
|
|
|
difftime = int(difftime)
|
|
|
|
# print(difftime)
|
|
|
|
else:
|
|
|
|
yo = str(veryfirstnow)
|
2019-12-03 11:19:56 +01:00
|
|
|
yoyo = dt.datetime.strptime(yo, '%Y-%m-%dT%H:%M:%S.%f')
|
2019-05-12 06:11:51 +02:00
|
|
|
|
|
|
|
difftime = time - yoyo
|
|
|
|
difftime = difftime.total_seconds()
|
|
|
|
difftime = int(difftime)
|
|
|
|
|
|
|
|
test = difftime
|
|
|
|
prev = time
|
|
|
|
diffpattern = test * '.'
|
|
|
|
stringy = diffpattern + upperword
|
|
|
|
|
|
|
|
if not (wholestringy is None):
|
|
|
|
wholestringy = wholestringy+stringy
|
|
|
|
fullscore = wholestringy
|
|
|
|
else:
|
|
|
|
wholestringy = upperword
|
|
|
|
fullscore = wholestringy
|
|
|
|
|
2019-05-24 16:59:49 +02:00
|
|
|
if not (fullscore is None):
|
|
|
|
# outside of the loop calculate seconds from "clickongetfiletime" minus the last "time" from the loop
|
|
|
|
lastdifftime = clickongetfiletime - prev
|
|
|
|
lastdifftime = lastdifftime.total_seconds()
|
|
|
|
lastdifftime = int(lastdifftime)
|
|
|
|
lastdifftime = lastdifftime * '.'
|
|
|
|
#the 60 seconds thing
|
|
|
|
# print(lastdifftime+"\n")
|
|
|
|
fullscore = initialtime+fullscore+lastdifftime
|
2019-05-13 01:45:35 +02:00
|
|
|
# Defining splitting point
|
2019-05-12 06:11:51 +02:00
|
|
|
n = 60
|
2019-05-13 01:45:35 +02:00
|
|
|
# Using list comprehension
|
2019-05-12 06:11:51 +02:00
|
|
|
out = [(fullscore[i:i+n]) for i in range(0, len(fullscore), n)]
|
|
|
|
#joining the strings with linebreaks
|
2020-01-16 17:54:13 +01:00
|
|
|
# tadam = '\n'.join(out) +"\n\n\n\nScore number : "+str(scorenumber)+ "\nGenerated at : "+str(clickongetfiletime)+"\nBy author : "+ userip
|
|
|
|
tadam = '\n'.join(out)
|
2019-05-24 16:59:49 +02:00
|
|
|
|
2019-05-12 19:18:58 +02:00
|
|
|
# have a message in file if no nav has been recorded so it's less dull than error page
|
|
|
|
if tadam is None:
|
|
|
|
tadam = "This score is Null"
|
2019-05-12 06:11:51 +02:00
|
|
|
|
2019-05-29 11:54:46 +02:00
|
|
|
uniquename = str(clickongetfiletime)
|
|
|
|
|
2020-01-16 17:54:13 +01:00
|
|
|
# with open('scores/'+uniquename+'.txt', 'a+') as f:
|
|
|
|
# f.write(tadam)
|
|
|
|
|
|
|
|
# print(tadam)
|
|
|
|
|
|
|
|
###### SCORE FIST PAGE
|
|
|
|
globalname = "Pushing scores"
|
2020-01-16 23:14:01 +01:00
|
|
|
print(tadam)
|
2020-01-16 17:54:13 +01:00
|
|
|
|
|
|
|
scorefirstpage = '''<!DOCTYPE html>
|
|
|
|
<html>
|
|
|
|
<head>
|
|
|
|
<meta charset="utf-8">
|
|
|
|
<link rel="stylesheet" type="text/css" href="static/css/stylepandoc.css">
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
<style>
|
2020-01-16 23:14:01 +01:00
|
|
|
body{background-color:magenta;}
|
2020-01-16 17:54:13 +01:00
|
|
|
</style>
|
|
|
|
<div id="coverpage">
|
2020-01-16 23:14:01 +01:00
|
|
|
<div class="infos scorenumber">SCORE #'''+str(scorenumber)+'''</div>
|
|
|
|
<div class="infos author">By '''+userip+'''</div>
|
|
|
|
<!--<div class="infos globalname">Part of the Pushing Scores series</div>-->
|
|
|
|
<div class="infos publisher">Part of the <i>Pushing Scores</i> series<br>
|
|
|
|
Published and distributed by <img src="static/logo/logo.png"></div>
|
|
|
|
<div class="infos time">On '''+str(clickongetfiletime)+'''</div>
|
2020-01-16 17:54:13 +01:00
|
|
|
</div>
|
|
|
|
</body>
|
|
|
|
</html>
|
|
|
|
'''
|
|
|
|
|
|
|
|
scorecontentpage = '''<!DOCTYPE html>
|
|
|
|
<html>
|
|
|
|
<head>
|
|
|
|
<meta charset="utf-8">
|
|
|
|
<link rel="stylesheet" type="text/css" href="static/css/stylepandoc.css">
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
<div id="scorecontentpage">
|
|
|
|
{}
|
|
|
|
</div>
|
|
|
|
</body>
|
|
|
|
</html>
|
|
|
|
'''.format(tadam)
|
|
|
|
|
|
|
|
############# GENERATE SCORE PDF
|
|
|
|
|
2020-01-16 23:14:01 +01:00
|
|
|
pathnamefilepdf = "scores/"+uniquename+"test.pdf"
|
2020-01-16 17:54:13 +01:00
|
|
|
font_config = FontConfiguration()
|
|
|
|
HTML(string=scorecontentpage, base_url='./').write_pdf(pathnamefilepdf, stylesheets=[CSS('static/css/stylepandoc.css', font_config=font_config)], font_config=font_config)
|
|
|
|
|
|
|
|
############# GENERATE FIRST PAGE PDF
|
|
|
|
|
2020-01-16 23:14:01 +01:00
|
|
|
pathnamefilecoverpdf = "scores/"+uniquename+"testcover.pdf"
|
2020-01-16 17:54:13 +01:00
|
|
|
font_config = FontConfiguration()
|
|
|
|
HTML(string=scorefirstpage, base_url='./').write_pdf(pathnamefilecoverpdf, stylesheets=[CSS('static/css/stylepandoc.css', font_config=font_config)], font_config=font_config)
|
|
|
|
|
|
|
|
############# ASSEMBLE PDFS
|
|
|
|
|
|
|
|
merger = PdfFileMerger()
|
|
|
|
file1 = open(pathnamefilecoverpdf,"rb")
|
|
|
|
file2 = open(pathnamefilepdf,"rb")
|
|
|
|
|
|
|
|
pdf1 = PyPDF2.PdfFileReader(file1)
|
|
|
|
pdf2 = PyPDF2.PdfFileReader(file2)
|
|
|
|
|
|
|
|
pdf1_pages = pdf1.getNumPages()
|
|
|
|
pdf2_pages = pdf2.getNumPages()
|
|
|
|
|
2020-01-16 23:14:01 +01:00
|
|
|
outputfile = open("scores/"+uniquename+".pdf", "wb")
|
2020-01-16 17:54:13 +01:00
|
|
|
writer = PyPDF2.PdfFileWriter()
|
|
|
|
|
|
|
|
for i in range(pdf1_pages):
|
|
|
|
writer.addPage(pdf1.getPage(i))
|
|
|
|
|
|
|
|
for j in range(pdf2_pages):
|
|
|
|
writer.addPage(pdf2.getPage(j))
|
|
|
|
|
|
|
|
writer.write(outputfile)
|
|
|
|
|
|
|
|
file1.close()
|
|
|
|
file2.close()
|
|
|
|
outputfile.close()
|
|
|
|
|
2020-01-16 17:01:12 +01:00
|
|
|
clearSession()
|
2020-01-16 23:14:01 +01:00
|
|
|
os.remove(pathnamefilecoverpdf)
|
|
|
|
os.remove(pathnamefilepdf)
|
|
|
|
print("files removed")
|
2020-01-16 17:01:12 +01:00
|
|
|
|
2020-01-16 17:54:13 +01:00
|
|
|
try:
|
2020-01-16 23:14:01 +01:00
|
|
|
return send_file("scores/"+uniquename+".pdf", attachment_filename="olala.pdf")
|
2020-01-16 17:54:13 +01:00
|
|
|
except Exception as e:
|
|
|
|
return str(e)
|
2019-04-17 19:31:33 +02:00
|
|
|
|
|
|
|
|
2019-05-27 17:32:05 +02:00
|
|
|
######################################################################################
|
|
|
|
#INDEX PAGE
|
|
|
|
######################################################################################
|
|
|
|
@app.route("/index")
|
|
|
|
def index():
|
|
|
|
return render_template('theindex.html', wordlist_dict=wordlist_dict, index_dict=index_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2019-04-17 19:31:33 +02:00
|
|
|
if __name__ == '__main__':
|
|
|
|
app.run(debug=True)
|