Browse Source

properly handle subsites urls

master
rra 5 years ago
parent
commit
57b7d4d793
  1. 10
      page_metadata.py

10
page_metadata.py

@ -102,21 +102,23 @@ def generate_metadata(path, context):
media_size = 0 media_size = 0
# enumerate all media displayed on the page # enumerate all media displayed on the page
media, soup = get_media(path) #reuse the same soup to limit calculation media, soup = get_media(path) #reuse the same soup to limit calculation
for m in media: for m in media:
# filter out SITEURL to prevent trouble # filter out SITEURL to prevent trouble
file_name = m.replace(context['SITEURL']+'/', '')
#print(context['SITEURL'], m)
# join output path to file, need to strip any leading slash for os.path # join output path to file, need to strip any leading slash for os.path
if subsites: if subsites:
file_name = m.replace(context['main_siteurl']+'/', '')
m = os.path.join(general_output_path, file_name.strip('/')) m = os.path.join(general_output_path, file_name.strip('/'))
else: else:
file_name = m.replace(context['SITEURL']+'/', '')
m = os.path.join(output_path, file_name.strip('/')) m = os.path.join(output_path, file_name.strip('/'))
print(m)
if os.path.exists(m): if os.path.exists(m):
print(m, 'exists')
media_size = media_size + os.path.getsize(m) media_size = media_size + os.path.getsize(m)
current_file = os.path.join(output_path, output_file) current_file = os.path.join(output_path, output_file)

Loading…
Cancel
Save