2022-10-09 15:24:08 +02:00
|
|
|
#!/bin/env python3
|
|
|
|
|
2022-10-05 13:55:50 +02:00
|
|
|
import requests
|
|
|
|
from time import sleep
|
2022-10-09 17:30:55 +02:00
|
|
|
import datetime, shutil, os
|
2022-10-05 21:16:02 +02:00
|
|
|
from urllib.parse import urlparse
|
2022-10-05 13:55:50 +02:00
|
|
|
|
|
|
|
#def download_media(dir, url):
|
2022-10-05 21:16:02 +02:00
|
|
|
# remote_url
|
|
|
|
# description
|
|
|
|
|
2022-10-09 15:50:57 +02:00
|
|
|
output_dir = "loooooops"
|
2022-10-05 21:16:02 +02:00
|
|
|
|
2022-10-09 16:17:31 +02:00
|
|
|
def grab_media(path, url):
|
2022-10-05 21:16:02 +02:00
|
|
|
|
|
|
|
media_item = urlparse(url).path.split('/')[-1]
|
2022-10-05 13:55:50 +02:00
|
|
|
|
2022-10-05 21:16:02 +02:00
|
|
|
headers = {
|
|
|
|
'User-Agent': 'https://git.vvvvvvaria.org/rra/radio-looptober',
|
|
|
|
'From': 'post.lurk.org/@lurk' # This is another valid field
|
|
|
|
}
|
|
|
|
|
|
|
|
if not os.path.exists(os.path.join(path, media_item)):
|
|
|
|
response = requests.get(url, headers=headers, stream=True)
|
|
|
|
if response.ok:
|
|
|
|
with open(os.path.join(path, media_item), 'wb') as media_file:
|
|
|
|
shutil.copyfileobj(response.raw, media_file)
|
|
|
|
print('Downloaded media {} from {}'.format(media_item, urlparse(url).netloc))
|
|
|
|
return media_item
|
2022-10-05 13:55:50 +02:00
|
|
|
|
|
|
|
#This pages through all the looptober tag and collects the json in 'data'
|
|
|
|
there_is_more = True
|
|
|
|
url = "https://post.lurk.org/api/v1/timelines/tag/looptober"
|
|
|
|
data = []
|
|
|
|
while there_is_more:
|
2022-10-05 21:16:02 +02:00
|
|
|
print("downloading", url)
|
|
|
|
r = requests.get(url)
|
|
|
|
print("response status: ", r.status_code)
|
|
|
|
if r.ok:
|
|
|
|
if r.content:
|
|
|
|
|
|
|
|
data.append(r.json())
|
|
|
|
print("amount of pages:", len(data))
|
|
|
|
sleep(0.5)
|
|
|
|
|
|
|
|
if r.links:
|
|
|
|
url = r.links["next"]["url"]
|
|
|
|
print("found next url", url)
|
|
|
|
|
|
|
|
else:
|
|
|
|
print("no more data")
|
|
|
|
there_is_more = False
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
break
|
2022-10-05 13:55:50 +02:00
|
|
|
|
|
|
|
#this parses all the json, taking a few valuable fields and puts them in looooops
|
|
|
|
looooops = []
|
|
|
|
for collection in data:
|
2022-10-05 21:16:02 +02:00
|
|
|
for i in collection:
|
|
|
|
if i["media_attachments"]: #we only take entries that actually contain a sound file
|
|
|
|
creation_date = datetime.datetime.fromisoformat(
|
|
|
|
i['created_at'][:-1]).astimezone(
|
|
|
|
datetime.timezone.utc)
|
|
|
|
|
|
|
|
if creation_date.strftime('%Y') == "2022": #we only take entries from this year
|
|
|
|
stuff = {}
|
|
|
|
stuff["url"] = i["url"]
|
|
|
|
stuff["description"] = i["content"]
|
|
|
|
stuff["audio"] = i["media_attachments"]
|
|
|
|
stuff["date"] = i["created_at"]
|
|
|
|
stuff["id"] = i["id"]
|
|
|
|
stuff["creator"] = i["account"]["username"]
|
|
|
|
looooops.append(stuff)
|
|
|
|
print("found post by {} with {} looops".format(
|
|
|
|
i["account"]["username"],
|
|
|
|
len(i["media_attachments"])))
|
|
|
|
|
|
|
|
if not os.path.exists(output_dir):
|
|
|
|
os.mkdir(output_dir)
|
|
|
|
|
|
|
|
for l in looooops:
|
|
|
|
path = os.path.join(output_dir,"{}_{}".format(l['creator'], l['id']))
|
|
|
|
if not os.path.exists(path):
|
|
|
|
os.mkdir(path)
|
|
|
|
|
|
|
|
print("\n")
|
|
|
|
print("Downloading looops by ***{}***".format(l['creator']))
|
|
|
|
for a in l['audio']:
|
|
|
|
if a['remote_url']:
|
|
|
|
url = a['remote_url']
|
|
|
|
else:
|
|
|
|
url = a['url']
|
|
|
|
|
|
|
|
grab_media(path, url)
|
|
|
|
|
2022-10-09 17:30:55 +02:00
|
|
|
# Once we've done everythin we generate a playlist and ask ezstream
|
|
|
|
# to reload it
|
|
|
|
os.system('find . -iname "*mp3" > playlist_loooooops.m3u'\
|
|
|
|
'&& kill -s HUP `pidof ezstream`')
|
|
|
|
|