mirror of
https://git.lurk.org/repos/radio-looptober.git
synced 2024-12-28 06:21:34 +01:00
transcode to opus
Signed-off-by: ugrnm <ultrageranium@bleu255.com>
This commit is contained in:
parent
253c4fbd34
commit
1dfd002e53
1
README
1
README
@ -15,6 +15,7 @@ There are two scripts:
|
|||||||
* download_loooooops.py
|
* download_loooooops.py
|
||||||
* gets latest toots tagged with #looptoper
|
* gets latest toots tagged with #looptoper
|
||||||
* download all the attachment if new
|
* download all the attachment if new
|
||||||
|
* transcodes to opus
|
||||||
* generates new playlist and tells ezstream to reload it
|
* generates new playlist and tells ezstream to reload it
|
||||||
|
|
||||||
|
|
||||||
|
2
TODO
2
TODO
@ -3,8 +3,6 @@ TODO
|
|||||||
|
|
||||||
* cache the json parsing/process locally for faster restart tests
|
* cache the json parsing/process locally for faster restart tests
|
||||||
* only download MP3 files
|
* only download MP3 files
|
||||||
* extract relevant json stuff to fill in or update ID3 tags
|
|
||||||
* 80c :)
|
* 80c :)
|
||||||
* convert the downloaded mp3 files to opus
|
|
||||||
* get rid of request, just use urlib or something
|
* get rid of request, just use urlib or something
|
||||||
* build the loop inside the download script
|
* build the loop inside the download script
|
||||||
|
@ -11,8 +11,27 @@ from urllib.parse import urlparse
|
|||||||
|
|
||||||
output_dir = "loooooops"
|
output_dir = "loooooops"
|
||||||
|
|
||||||
|
bitrate = "128k"
|
||||||
|
|
||||||
|
def transcode_media(path, media_item, metadata):
|
||||||
|
infile = os.path.join(path, media_item)
|
||||||
|
outfile = os.path.join(path, media_item + ".opus")
|
||||||
|
if not os.path.exists(outfile):
|
||||||
|
print("transcodeing to {}".format(outfile))
|
||||||
|
pid = os.fork()
|
||||||
|
if pid == 0:
|
||||||
|
artist = metadata["creator"]
|
||||||
|
title = metadata["url"]
|
||||||
|
comment = metadata["description"]
|
||||||
|
date = metadata["date"]
|
||||||
|
os.execlp("ffmpeg", "ffmpeg", "-hide_banner", "-loglevel", "error", "-i", infile, "-map_metadata", "-1", "-metadata", "artist={}".format(artist), "-metadata", "title={}".format(title), "-metadata", "creation_time={}".format(date), "-map_chapters", "-1", "-ac", "2", "-af", "loudnorm=dual_mono=true", "-b:a", bitrate, "-y", outfile)
|
||||||
|
# never reached
|
||||||
|
else:
|
||||||
|
os.wait()
|
||||||
|
|
||||||
def grab_media(path, url):
|
def grab_media(path, url):
|
||||||
|
|
||||||
|
try:
|
||||||
media_item = urlparse(url).path.split('/')[-1]
|
media_item = urlparse(url).path.split('/')[-1]
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
@ -20,13 +39,19 @@ def grab_media(path, url):
|
|||||||
'From': 'post.lurk.org/@lurk' # This is another valid field
|
'From': 'post.lurk.org/@lurk' # This is another valid field
|
||||||
}
|
}
|
||||||
|
|
||||||
if not os.path.exists(os.path.join(path, media_item)):
|
if os.path.exists(os.path.join(path, media_item)):
|
||||||
|
return media_item
|
||||||
|
else:
|
||||||
response = requests.get(url, headers=headers, stream=True)
|
response = requests.get(url, headers=headers, stream=True)
|
||||||
if response.ok:
|
if response.ok:
|
||||||
with open(os.path.join(path, media_item), 'wb') as media_file:
|
with open(os.path.join(path, media_item), 'wb') as media_file:
|
||||||
shutil.copyfileobj(response.raw, media_file)
|
shutil.copyfileobj(response.raw, media_file)
|
||||||
print('Downloaded media {} from {}'.format(media_item, urlparse(url).netloc))
|
print('Downloaded media {} from {}'.format(media_item, urlparse(url).netloc))
|
||||||
return media_item
|
return media_item
|
||||||
|
except requests.exceptions.ConnectionError as e:
|
||||||
|
# maybe transient network issues
|
||||||
|
print(e)
|
||||||
|
sleep(60)
|
||||||
|
|
||||||
#This pages through all the looptober tag and collects the json in 'data'
|
#This pages through all the looptober tag and collects the json in 'data'
|
||||||
there_is_more = True
|
there_is_more = True
|
||||||
@ -92,10 +117,12 @@ for l in looooops:
|
|||||||
else:
|
else:
|
||||||
url = a['url']
|
url = a['url']
|
||||||
|
|
||||||
grab_media(path, url)
|
media_item = grab_media(path, url)
|
||||||
|
if media_item:
|
||||||
|
transcode_media(path, media_item, l)
|
||||||
|
|
||||||
# Once we've done everythin we generate a playlist and ask ezstream
|
# Once we've done everythin we generate a playlist and ask ezstream
|
||||||
# to reload it
|
# to reload it
|
||||||
os.system('find . -iname "*mp3" > playlist_loooooops.m3u'\
|
os.system('find . -iname "*opus" > playlist_loooooops.m3u'\
|
||||||
'&& kill -s HUP `pidof ezstream`')
|
'&& kill -s HUP `pidof ezstream`')
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user