Files for the publication & poster for Data Workers, an exhibition by Algolit. http://www.algolit.net/index.php/Data_Workers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

147 lines
6.7 KiB

import random, math
from functions import insert_linebreaks, insert_text_block, convert_to_figlet_font, insert_symbol_background
zones = {
'en' : {
'writers' : {
'subjects' : ['data workers', 'many authors', 'every human being who has access to the internet', 'we', 'we', 'we', 'some neural networks', 'human editors', 'poets, playwrights or novelists'],
'actions' : ['work', 'write', 'interacts', 'chat, write, click, like and share', 'leave our data', 'find ourselves writing in Python', 'write', 'assist', 'assist'],
'verb' : 'write'
},
'oracles' : {
'subjects' : ['machine learning', 'models', 'models', 'they', 'they', 'information extraction', 'text classification'],
'actions' : ['analyses and predicts', 'have learned', 'are used', 'influence', 'have their say', 'recognizes', 'detects'],
'verb' : 'predict'
},
'cleaners' : {
'subjects' : ['we', 'we', 'human work', 'poorly-paid freelancers', 'volunteers', 'whoever'],
'actions' : ['helped', 'cleaned', 'is needed', 'carry out', 'do fantastic work', 'cleans up text'],
'verb' : 'clean'
},
'informants' : {
'subjects' : ['each dataset', 'datasets', 'some datasets', 'models that require supervision', 'models', 'some of the datasets', 'humans'],
'actions' : ['collects different information about the world', 'are imbued with collector\'s bias', 'combine machinic logic with human logic', 'multiply the subjectivities', 'propagate what they\'ve been taught', 'pass as default in the machine learning field', 'guide machines'],
'verb' : 'inform'
},
'readers' : {
'subjects' : ['a computer', 'all models', 'some models', 'some models'],
'actions' : ['understands', 'translate', 'count', 'replace'],
'verb' : 'read'
},
'learners' : {
'subjects' : ['learners', 'learners', 'learners', 'classifiers', 'learners', 'learners'],
'actions' : ['are pattern finders', 'are crawling through data', 'generate some kind of specific \'grammar\'', 'generate, evaluate and readjust', 'understand and reveal patterns', 'don\'t always distuinguish well which patterns should be repeated'],
'verb' : 'learn'
}
},
'fr' : {
'écrivains' : {
'subjects' : ['data workers', 'many authors', 'every human being who has access to the internet', 'we', 'we', 'we', 'some neural networks', 'human editors', 'poets, playwrights or novelists'],
'actions' : ['work', 'write', 'interacts', 'chat, write, click, like and share', 'leave our data', 'find ourselves writing in Python', 'write', 'assist', 'assist'],
'subject' : 'writers',
'verb' : 'write'
},
'oracles' : {
'subjects' : ['machine learning', 'models', 'models', 'they', 'they', 'information extraction', 'text classification'],
'actions' : ['analyses and predicts', 'have learned', 'are used', 'influence', 'have their say', 'recognizes', 'detects'],
'subject' : 'oracles',
'verb' : 'predict'
},
'nettoyeurs' : {
'subjects' : ['we', 'we', 'human work', 'poorly-paid freelancers', 'volunteers', 'whoever'],
'actions' : ['helped', 'cleaned', 'is needed', 'carry out', 'do fantastic work', 'cleans up text'],
'subject' : 'cleaners',
'verb' : 'clean'
},
'informateurs' : {
'subjects' : ['each dataset', 'datasets', 'some datasets', 'models that require supervision', 'models', 'some of the datasets', 'humans'],
'actions' : ['collects different information about the world', 'are imbued with collector\'s bias', 'combine machinic logic with human logic', 'multiply the subjectivities', 'propagate what they\'ve been taught', 'pass as default in the machine learning field', 'guide machines'],
'subject' : 'informants',
'verb' : 'inform'
},
'lecteurs' : {
'subjects' : ['a computer', 'all models', 'some models', 'some models'],
'actions' : ['understands', 'translate', 'count', 'replace'],
'subject' : 'readers',
'verb' : 'read'
},
'apprenants' : {
'subjects' : ['learners', 'learners', 'learners', 'classifiers', 'learners', 'learners'],
'actions' : ['are pattern finders', 'are crawling through data', 'generate some kind of specific \'grammar\'', 'generate, evaluate and readjust', 'understand and reveal patterns', 'don\'t always distuinguish well which patterns should be repeated'],
'subject' : 'learners',
'verb' : 'learn'
}
}
}
def create_zone_backcover(zone, language, symbols):
print('... Creating zone_backcover')
symbols = symbols.replace('\n', '')
if language == 'fr':
zone = zones[language][zone.lower()]['subject']
language = 'en'
subjects = zones[language][zone.lower()]['subjects']
actions = zones[language][zone.lower()]['actions']
subject_figlets = [convert_to_figlet_font(subject, 18, font='digital', alignment='right') for subject in subjects]
action_figlets = [convert_to_figlet_font(action, 18, font='digital', alignment='left') for action in actions]
# Sync the left and right column
# Count the length of the lines for each subject and action
subjects_num_of_lines = [int(len(subject.split('\n')) / 3) for subject in subject_figlets]
actions_num_of_lines = [int(len(action.split('\n')) / 3) for action in action_figlets]
# Apply the counting, append extra lines
subjects_string = ''
for s, subject in enumerate(subjects):
if actions_num_of_lines[s] == 1:
actions_num_of_lines[s] = 0
subjects_string += subject_figlets[s] + ('\n\n\n' * (actions_num_of_lines[s] - 1))
# print('subject', actions_num_of_lines[s], subject)
actions_string = ''
for a, action in enumerate(actions):
if subjects_num_of_lines[a] == 1:
subjects_num_of_lines[a] = 0
actions_string += ('\n\n\n' * (subjects_num_of_lines[a] - 1)) + action_figlets[a]
# print(' action', subjects_num_of_lines[a], action)
left = insert_linebreaks(subjects_string, 56, type='wrap')
right = insert_linebreaks(actions_string, 56, type='wrap')
two_columns = insert_text_block(left, right, 57, 55)
page = ''
# Append the header
subject_left = convert_to_figlet_font(zone.lower(), 18, font='digital', alignment='right')+'\n'
subject_left = insert_linebreaks(subject_left, 56, type='wrap')
action_right = convert_to_figlet_font(zones[language][zone.lower()]['verb'], 18, font='digital', alignment='left')+'\n'
action_right = insert_linebreaks(action_right, 56, type='wrap')
page += insert_text_block(subject_left, action_right, 57, 55)
page += '\n'
# Insert the two columns
page += two_columns + '\n'
# Fill the remaining empty lines on the page
current_page_lines = len(page.split('\n'))
if current_page_lines < 70:
for x in range(current_page_lines, 69):
print(x)
for y in range(1, 110):
print(y)
page += ' '
page += '\n'
# Apply symbols background
multiplier = 500
page = insert_symbol_background(page, 110, symbols, multiplier)
print('>>> current_page_lines:', len(page.split('\n')))
return page
# print(create_zone_backcover('writers'))