import urllib.request import os import re import json import jinja2 STATIC_FOLDER_PATH = './static' # without trailing slash PUBLIC_STATIC_FOLDER_PATH = '/static' # without trailing slash TEMPLATES_DIR = None # This uses a low quality copy of all the images # (using a folder with the name "images-small", # which stores a copy of all the images generated with: # $ mogrify -quality 5% -adaptive-resize 25% -remap pattern:gray50 * ) fast = False def API_request(url, pagename): """ url = API request url (string) data = { 'query': 'pages' : pageid : { 'links' : { '?' : '?' 'title' : 'pagename' } } } } """ response = urllib.request.urlopen(url).read() data = json.loads(response) # Save response as JSON to be able to inspect API call json_file = f'{ STATIC_FOLDER_PATH }/{ pagename }.json' print('Saving JSON:', json_file) with open(json_file, 'w') as out: out.write(json.dumps(data, indent=4)) out.close() return data def download_media(html, images, wiki): """ html = string (HTML) images = list of filenames (str) """ # check if 'images/' already exists if not os.path.exists(f'{ STATIC_FOLDER_PATH }/images'): os.makedirs(f'{ STATIC_FOLDER_PATH }/images') # download media files for filename in images: filename = filename.replace(' ', '_') # safe filenames # check if the image is already downloaded # if not, then download the file if not os.path.isfile(f'{ STATIC_FOLDER_PATH }/images/{ filename }'): # first we search for the full filename of the image url = f'{ wiki }/api.php?action=query&list=allimages&aifrom={ filename }&format=json' response = urllib.request.urlopen(url).read() data = json.loads(response) # we select the first search result # (assuming that this is the image we are looking for) image = data['query']['allimages'][0] # then we download the image image_url = image['url'] image_filename = image['name'] print('Downloading:', image_filename) image_response = urllib.request.urlopen(image_url).read() # and we save it as a file image_path = f'{ STATIC_FOLDER_PATH }/images/{ image_filename }' out = open(image_path, 'wb') out.write(image_response) out.close() import time time.sleep(3) # do not overload the server # replace src link image_path = f'{ PUBLIC_STATIC_FOLDER_PATH }/images/{ filename }' # here the images need to link to the / of the domain, for flask :/// confusing! this breaks the whole idea to still be able to make a local copy of the file matches = re.findall(rf'src="/book/images/.*?px-{ filename }"', html) # for debugging if matches: html = re.sub(rf'src="/book/images/.*?px-{ filename }"', f'src="{ image_path }"', html) else: matches = re.findall(rf'src="/book/images/.*?{ filename }"', html) # for debugging html = re.sub(rf'src="/book/images/.*?{ filename }"', f'src="{ image_path }"', html) # print(f'{filename}: {matches}\n------') # for debugging: each image should have the correct match! return html def add_item_inventory_links(html): """ html = string (HTML) """ # Find all references in the text to the item index pattern = r'Item \d\d\d' matches = re.findall(pattern, html) index = {} new_html = '' from nltk.tokenize import sent_tokenize for line in sent_tokenize(html): for match in matches: if match in line: number = match.replace('Item ', '').strip() if not number in index: index[number] = [] count = 1 else: count = index[number][-1] + 1 index[number].append(count) item_id = f'ii-{ number }-{ index[number][-1] }' line = line.replace(match, f'Item { number }') # the line is pushed back to the new_html new_html += line + ' ' # Also add a around the index nr to style it matches = re.findall(r'
  • \d\d\d', new_html) for match in matches: new_html = new_html.replace(match, f'
  • { match }') # import json # print(json.dumps(index, indent=4)) return new_html def tweaking(html): """ html = string (HTML) """ html = html.replace(') def update_material_now(pagename, wiki): """ pagename = string publication_unfolded = string (HTML) """ publication_unfolded = parse_page(pagename, wiki) return publication_unfolded # --- if __name__ == "__main__": wiki = 'https://possiblebodies.constantvzw.org/book' # remove tail slash '/' pagename = 'Unfolded' publication_unfolded = update_material_now(pagename, wiki) # download the latest version of the page save(publication_unfolded, pagename) # save the page to file