Browse Source

pushing the cross-reader files to the git

english-french
manetta 5 years ago
commit
77931be51e
  1. 3
      .gitignore
  2. 49
      README.md
  3. 51
      escpos.py
  4. 31848
      index.json
  5. 35
      manifestos.py
  6. 102
      printer.py
  7. 200
      readings.py
  8. 113
      start.py
  9. BIN
      static/css/fonts/LiberationSansNarrow-Bold.ttf
  10. BIN
      static/css/fonts/unifont-11.0.03.ttf
  11. 278
      static/css/stylesheet.css
  12. 20
      static/html/1912_The-Manifesto-of-Futurist-Woman_Valentine-de-Saintpoint.html
  13. 29
      static/html/1967_SCUM_Manifesto.html
  14. BIN
      static/images/epson.png
  15. 2
      static/js/jquery-3.3.1.min.js
  16. 74
      templates/en/base.html
  17. 150
      templates/en/colophon.html
  18. 52
      templates/en/index.html
  19. 20
      templates/en/manifesto.html
  20. 62
      templates/en/mappings-name.html
  21. 17
      templates/en/mappings.html
  22. 27
      templates/en/ordered.html
  23. 111
      templates/en/results.html
  24. 74
      templates/fr/base.html
  25. 158
      templates/fr/colophon.html
  26. 53
      templates/fr/index.html
  27. 20
      templates/fr/manifesto.html
  28. 62
      templates/fr/mappings-name.html
  29. 20
      templates/fr/mappings.html
  30. 27
      templates/fr/ordered.html
  31. 118
      templates/fr/results.html
  32. 27
      templates/fr/results_lists.html
  33. 124
      tfidf.py
  34. 52
      words.txt

3
.gitignore

@ -0,0 +1,3 @@
__pycache__
txt/*

49
README.md

@ -0,0 +1,49 @@
# cyber/technofeminist cross-readings
<https://cross.virtualprivateserver.space>
This is the code for a cyber/technofeminist cross-reader, developed in the context of the exhibition Computer Grrrls in Paris (March - July 2019). - <https://gaite-lyrique.net/evenement/computer-grrrls>
*A cross-reading allows for a specific type of reading that does not follow a linear order, but follows a self-chosen path. Reading becomes an act of creating relations and threading connections, reading across different domains.*
*This cyber/technofeminist cross-reader does not follow one but two axes, bridging the act of reading a collection of texts, with the act of reading a tool. Reading across a collection of manifestos, while reading the algorithm that is used to do so.*
## Installation
Requirements -- apt:
`$ sudo apt install python3 python3-pip`
Requirements -- pip3:
`$ pip install flask nltk`
Requirements -- nltk
`$ python3`
`>>> import nltk`
`>>> nltk.download('averaged_perceptron_tagger')`
## Run locally
To run the cross-reader locally, the script `start.py` can be used.
`$ python3 start.py`
In the browser now visit:
`localhost:5000`
## Copyleft
The cyber/technofeminist cross-reader is a free work, you can copy, distribute, and modify it under the terms of the Free Art License.
<http://artlibre.org/licence/lal/en/>

51
escpos.py

@ -0,0 +1,51 @@
#!/usr/bin/env python3
# https://reliance-escpos-commands.readthedocs.io/en/latest/layout.html#d57
escpos = {
"init_printer": "\x1B\x40",
"justify_left": "\x1B\x61\x00",
"justify_center": "\x1B\x61\x01",
"justify_right": "\x1B\x61\x02",
'doubleprinting_on': "\x1B\x47\x01",
'doubleprinting_off': "\x1B\x47\x00",
'inverted_on': "\x1D\x42\x01",
'inverted_off': "\x1D\x42\x00",
'large_w': "\x1D\x21\x70",
'large_h': "\x1D\x21\x07",
'margin_left': "\x1D\x4C\x15\x00",
'print_area': "\x1D\x57\x80\x00",
'font_a': "\x1B\x4D\x00",
'font_b': "\x1B\x4D\x01",
'largefont': "\x1B\x21\x70",
'mediumfont': "\x1B\x21\x10",
'normalfont': "\x1B\x21\x00",
'bold': "\x1B\x21\x08",
'reset': "\x1B\x21\x00",
'space_btw_letters_0L': '\x1B\x20\x00', # n [0,255]
'space_btw_letters_5L': '\x1B\x20\x05', # n [0,255]
'space_btw_letters_10L': '\x1B\x20\x10', # n [0,255]
'space_btw_letters_20L': '\x1B\x20\x20', # n [0,255]
'paperfeed_1l': '\x1B\x64\x01' ,
'paperfeed_10l': '\x1B\x64\x10' ,
'papercut':'\x1D\x56\x00',
'direction_0': '\x1B\x56\x00' ,
'direction_90': '\x1B\x56\x01' ,
'reverse_print_on': '\x1D\x42\x01',
'reverse_print_off': '\x1D\x42\x00',
'pagedefault': '\x1B\x53' , #?
}

31848
index.json

File diff suppressed because it is too large

35
manifestos.py

@ -0,0 +1,35 @@
manifestos = {
'1912_The_Manifesto_of_Futurist_Woman_[EN]' : 'https://www.wired.com/2008/11/the-manifesto-1/',
'1912_Manifeste_de_la_Femme_Futuriste_[FR]' : 'https://vvvvvvaria.org/~mb/techfem/1912_Manifeste_de_la_Femme_Futuriste_%5BFR%5D.html',
'1967_S.C.U.M_manifesto_[EN]' : 'https://web.archive.org/web/20050831003435/http://www.emf.net:80/~estephen/manifesto/aum00110.html',
'1967_S.C.U.M_manifesto_[FR]' : 'https://infokiosques.net/lire.php?id_article=4',
'1984_A_Cyborg_Manifesto_[EN]' : 'https://monoskop.org/images/4/4c/Haraway_Donna_1985_A_Manifesto_for_Cyborgs_Science_Technology_and_Socialist_Feminism_in_the_1980s.pdf',
'1984_Manifeste_Cyborg_[FR]' : 'https://wiki.lereset.org/_media/harawaynb.pdf?page=16',
'1989_RIOT_GRRRL_MANIFESTO_[EN]' : 'http://historyisaweapon.com/defcon1/riotgrrrlmanifesto.html',
'1991_Cyberfeminist_manifesto_for_the_21st_century_[EN]' : 'https://monoskop.org/images/4/44/VNS_Matrix_1.jpg',
'1991_Cyberfeminist_manifesto_for_the_21st_century_[FR]' : 'https://web.archive.org/web/20031212082858/http://cyberfeminisme.org:80/txt/VNS.htm',
'1996_Bitch_Mutant_Manifesto_[EN]' : 'https://www.obn.org/reading_room/manifestos/html/bitch.html',
'1997_Cyberfeminism_is_not_[EN+DE+NL+FR]' : 'https://obn.org/cfundef/100antitheses.html',
'2002_Refugia_[EN]' : 'http://www.refugia.net/domainerrors/DE3j_refugia.pdf',
'2009_Glitch_Manifesto_[EN]' : 'https://beyondresolution.nyc3.digitaloceanspaces.com/_Rosa%20Menkman%20-%20Glitch%20Studies%20Manifesto.pdf',
'2012_Glitch_Feminism_Manifesto_[EN]' : 'https://vvvvvvaria.org/~mb/techfem/Glitch_Feminism_Rhizome.html',
'2014_A_Feminist_Server_Manifesto_[EN]' : 'https://pad.constantvzw.org/p/r.512aebbec357ddfad6e2d8a62b965ed0',
'2013_The_Mundane_Afrofuturist_Manifesto_[EN]' : 'https://vvvvvvaria.org/~mb/techfem/The_Mundane_Afrofuturist_Manifesto_Rhizome.html',
'2013_Wages_for_Facebook_[EN]' : 'http://wagesforfacebook.com/',
'2014_Gynepunk_Manifesto_[EN]' : 'https://hackteria.org/wiki/GynePUNK',
'2014_Gynepunk_Manifesto_[ES]' : 'https://gynepunk.hotglue.me/?intro',
'2014_Gynepunk_Manifesto_[FR]' : 'http://gynepunk.tumblr.com/post/156267922875/gynepunk-manifesto-french',
'2014_tRANShACKfEMINISta_[ES]' : 'https://pechblenda.hotglue.me/?transhackfeminismo',
'2014_tRANShACKfEMINISta_[IT]' : 'https://pechblenda.hotglue.me/?transhackfeminista_it',
'2014_tRANShACKfEMINISt_[EN]' : 'https://pechblenda.hotglue.me/?transhackfeminism_en',
'2015_Manifesto_for_the_Gynecene_[EN]' : 'http://ro.tranzit.org/files/MANIFESTO-for-the-Gynecene.pdf',
'2015_The_3D_Additivist_Manifesto_[EN]' : 'https://additivism.org/manifesto',
'2015_Xenofeminist_manifesto_[EN]' : 'http://www.laboriacuboniks.net/',
'2015_Xenofeminist_manifesto_[FR]' : 'http://www.laboriacuboniks.net/fr/index.html',
'2016_Feminist_Principles_of_the_Internet_[EN]' : 'https://feministinternet.org/sites/default/files/Feminist_principles_of_the_internetv2-0.pdf',
'2018_Hackers_of_Resistance_Manifesto_[EN]' : 'https://player.vimeo.com/video/232424555',
'2018_Purple_Noise_Manifesto_[EN]' : 'https://www.obn.org/purplenoise/wp-content/uploads/2019/02/Manifesto18FEB2019.pdf',
'2019_Cyberwitches_Manifesto_[EN]' : 'http://lucilehaute.fr/cyberwitches-manifesto/cyberwitches-manifesto-en.html',
'2019_Cyberwitches_Manifesto_[FR]' : 'http://lucilehaute.fr/cyberwitches-manifesto/cyberwitches-manifesto.html',
'2019_The_Call_for_Feminist_Data_[EN]' : 'https://vvvvvvaria.org/~mb/techfem/feminist_manifesto_large.pdf'
}

102
printer.py

@ -0,0 +1,102 @@
#!/usr/bin/env python3
import readings
from pprint import pprint
import re, sys
from escpos import escpos
from sys import stdin, stderr, stdout
from time import sleep
import nltk
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+') # initialize tokenizer
try:
printerpath = '/dev/usb/lp0'
stdout = open(printerpath, 'w')
connected = True
print('*receipt printer connected*')
except:
stdout = sys.__stdout__
connected = False
print('*no receipt printer connected*')
def insert_linebreaks(string, linelength):
words = tokenizer.tokenize(string)
count = 0
tmp = ''
new = ''
for word in words:
length = len(word)
count += len(word)
if word == words[-1]:
tmp += word
new += tmp
elif count <= linelength:
tmp += word + ' '
else:
new += tmp + '\n'
tmp = ''
count = 0
return new
def printNow(query, results):
if connected == True:
print(escpos['reset'], file=stdout, flush=True)
print(escpos['init_printer'], file=stdout, flush=True)
print(escpos['margin_left'], file=stdout, flush=True)
print(escpos['justify_center'], file=stdout, flush=True)
for x, document in results.items():
for n, sentence in enumerate(document['matches']):
sentence = str(sentence) # Turn Flask Markup object back to normal python string
sentence = sentence.replace('\n', ' ').replace(' ', ' ')
pattern = r'[\s\W\_]'+query+r'[\s\W\_\n]|^'+query+'|'+query+'$'
match = re.search(pattern, sentence, flags=re.IGNORECASE)
# print('match:', match)
if match:
match = match.group()
sentence_splitted = re.compile(match).split(sentence) # find the query in the sentence, and split the sentence up
# print('sentence splitted:', sentence_splitted)
for i, part in enumerate(sentence_splitted):
if n == 0: # first sentence of this document
if i == 0: # start of a new sentence
print('', file=stdout, flush=True)
linebreak = ''' **
** /** **
//** /** **
**************
///**//**//**/
** /** //**
// /** //
// '''
print(linebreak, file=stdout, flush=True) # print line break ascii art
print('\n', file=stdout, flush=True)
title = insert_linebreaks(document['name'], 46)
print(escpos['normalfont'], title, file=stdout, flush=True) # print the document name
print('\n', file=stdout, flush=True)
# start of sentence
part = insert_linebreaks(part, 21)
print(escpos['largefont'], part, escpos['bold'], file=stdout, flush=True) # print current part of sentence
if i + 1 != len(sentence_splitted): # if this part is not the last one
print(match, escpos['reset'], file=stdout, flush=True) # print query
# line breaker after each sentence
if i + 1 == len(sentence_splitted):
print('\n | \n', escpos['normalfont'], file=stdout, flush=True)
sleep(3)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
sleep(3)
print(escpos['papercut'], file=stdout, flush=True)
print(escpos['reset'], file=stdout, flush=True)
# query = 'noise'
# results, _, _ = readings.request_results(query)
# pprint(results)
# printNow(query, results)

200
readings.py

@ -0,0 +1,200 @@
import os, json, re
from flask import Markup
import nltk
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+') # initialize tokenizer
import pprint
pp = pprint.PrettyPrinter(indent=4)
import tfidf
# TF-IDF visualisation multiplier
multiplier = 25000
def load_index():
if os.path.isfile('index.json') == False:
tfidf.create_index()
f = open('index.json').read()
index = json.loads(f)
return index
def get_random(x, y):
from random import randint
return randint(x, y)
def generate_random_rgb():
r = get_random(0, 255)
g = get_random(0, 255)
b = get_random(0, 255)
return r, g, b
def request_mappings_all():
index = load_index()
filenames = [manifesto for manifesto, _ in index.items()]
mappings = {}
for manifesto, _ in index.items():
words = []
for sentence in index[manifesto]['sentences']:
for word in tokenizer.tokenize(sentence):
tfidf = index[manifesto]['tfidf'][word] * multiplier
if [tfidf, word] not in words:
words.append([tfidf, word])
words.sort(reverse=True)
mappings[manifesto] = words
# pp.pprint(mappings)
return mappings
def request_mappings(name):
index = load_index()
filenames = [manifesto for manifesto, _ in index.items()]
mappings = {}
for manifesto, _ in index.items():
if manifesto == name:
sentences = []
for sentence in index[manifesto]['sentences']:
words = []
for word in tokenizer.tokenize(sentence):
tfidf = index[manifesto]['tfidf'][word] * multiplier
words.append([word, tfidf])
sentences.append(words)
mappings[manifesto] = sentences
# pp.pprint(mappings)
return mappings, filenames
def insert_query_highlight(query, sentence, r, g, b):
pattern = r'[\s\W\_]'+query+r'[\s\W\_]|^'+query+'|'+query+'$'
match = re.search(pattern, sentence, flags=re.IGNORECASE)
if match:
match = match.group()
sentence = re.sub(pattern, ' <strong class="query" style="color:rgba({r},{g},{b},1); background-image: radial-gradient(ellipse, rgba({r},{g},{b},0.4), rgba({r},{g},{b},0.2), transparent, transparent);">{match}</strong> '.format(match=match, r=r, b=b, g=g), sentence, flags=re.IGNORECASE)
return sentence
def insert_suggestion_links(query, sentence):
# insert further reading links
for suggestion in open('words.txt','r').readlines():
suggestion = suggestion.replace('\n', '').strip()
if suggestion:
if suggestion != query:
pattern = r'[\s\W\_]'+suggestion+r'[\s\W\_]|^'+suggestion+'|'+suggestion+'$'
match = re.search(pattern, sentence, flags=re.IGNORECASE)
if match:
match = match.group()
match = match.replace(suggestion, '<a href="?q={0}">{0}</a>'.format(suggestion))
sentence = re.sub(pattern, '<strong>{}</strong>'.format(match), sentence, flags=re.IGNORECASE)
return sentence
def generate_analytics(query, results, index):
analytics = {}
mappings = request_mappings_all()
for manifesto, items in mappings.items():
if manifesto == results[0]['filename']:
analytics['mappings'] = mappings[manifesto]
# Stemmer (very similar words)
analytics['stemmer'] = []
porter = nltk.PorterStemmer()
basequery = porter.stem(query)
for manifesto, _ in index.items():
words = index[manifesto]['tfidf'].keys()
bases = [[porter.stem(word), word] for word in words]
# print('Stemmer bases', bases)
for base, word in bases:
if base == basequery:
analytics['stemmer'].append(word)
analytics['stemmer'] = set(analytics['stemmer'])
if query in analytics['stemmer']:
analytics['stemmer'].remove(query)
# print('Stemmer:', matches)
print('*analytics information returned*')
# pp.pprint(analytics)
return analytics
def request_results(query):
print('\n*results request started*')
query = query.strip().lower()
print('Query:', query)
index = load_index()
filenames = [document for document, _ in index.items()]
results = {}
# results = {
# 0 : {
# 'name' : 'Feminist manifesto (2000)',
# 'filename' : '2000_Feminist_manifesto',
# 'tfidf' : 0.00041,
# 'matches' : [
# 'This is a first matching sentence.',
# 'This is a second matching sentence.',
# 'This is a third matching sentence.'
# ]
# }
# }
# First, sort the matching manifestos on TF-IDF values
order = []
for manifesto, _ in index.items():
for key in index[manifesto]['tfidf'].keys():
if query == key.lower():
# print('Query match:', query)
match = [index[manifesto]['tfidf'][key], manifesto]
order.append(match)
break
order.sort(reverse=True)
# print('Order:', order)
# Loop through the sorted matches
# and add all the data that is needed
# (sentences, tfidf value, manifesto name)
x = 0
for tfidf, manifesto in order:
print('\n---', manifesto, '---')
results[x] = {}
results[x]['name'] = index[manifesto]['name'] # nicely readable name
results[x]['filename'] = manifesto
results[x]['tfidf'] = tfidf
results[x]['matches'] = []
results[x]['html'] = []
# Generate a random RGB color for this manifesto
r, g, b = generate_random_rgb()
# All sentences from this manifesto
sentences = index[manifesto]['sentences']
# Collect matching sentences only
for sentence in sentences:
for word in tokenizer.tokenize(sentence):
if word.lower() == query:
# Append sentence to final set of matching results
results[x]['matches'].append(sentence)
print('Matching sentence:', sentence.replace('\n', ' '))
# Transform sentence into an HTML elements
html = insert_query_highlight(query, sentence, r, g, b)
html = insert_suggestion_links(query, html)
html = Markup(html)
results[x]['html'].append(html)
break # Append sentence only once
x += 1
# pp.pprint(results)
print('\n*results returned*')
# Add analytics
if results.keys():
analytics = generate_analytics(query, results, index)
else:
analytics = False
# pp.pprint(analytics)
return results, filenames, analytics

113
start.py

@ -0,0 +1,113 @@
#!/usr/bin/env python3
import os, sys
from sys import stdout
import flask
from flask import request, redirect
import tfidf
import readings
import printer
from manifestos import manifestos
# Create the application.
APP = flask.Flask(__name__)
# Jinja filters
def prettyfilename(filename):
return filename.replace('_', ' ').replace('-', ' ').replace('.txt', '')
APP.jinja_env.filters['prettyfilename'] = prettyfilename
# Jinja globals
def get_random(x, y):
from random import randint
return randint(x, y)
APP.jinja_env.globals.update(get_random=get_random)
@APP.route('/', methods=['GET', 'POST'])
def index():
return redirect('/fr/')
@APP.route('/<lang>/', methods=['GET', 'POST'])
def index_lang(lang):
"""
Displays the index page accessible at '/<lang>/'
Which is either the start page (index.html)
or a results page (results.html).
"""
query = None
results = None
query = request.args.get('q', '')
suggestions = open('words.txt', 'r').readlines()
# Check printer argument
if printer.connected == True:
connection = 'connected'
else:
connection = 'disconnected'
if request.args.get('q', ''):
results, filenames, analytics = readings.request_results(query)
# print commands
if request.args.get('print', '') == 'now':
if connection == 'connected':
print('*print!*', file=stdout)
printer.printNow(query, results)
return flask.render_template(lang+'/results.html', query=query, results=results, filenames=filenames, connection=connection, suggestions=suggestions, analytics=analytics, lang=lang)
else:
index = readings.load_index()
filenames = [manifesto for manifesto, _ in index.items()]
suggestions = open('words.txt', 'r').readlines()
mappings = readings.request_mappings_all()
mappings_top = [[tfidf, word] for manifesto, words in mappings.items() for tfidf, word in words]
mappings_top.sort(reverse=True)
return flask.render_template(lang+'/index.html', filenames=filenames, suggestions=suggestions, mappings=mappings_top[:100], lang=lang)
@APP.route('/<lang>/manifesto/<name>', methods=['GET', 'POST'])
def manifesto(lang, name):
"""
Displays the page accessible at '/<lang>/manifesto/<name>'.
Here, an iframe is shown with the manifesto in its own context.
"""
index = readings.load_index()
filenames = sorted([manifesto for manifesto, _ in index.items()])
link = manifestos[name]
return flask.render_template(lang+'/manifesto.html', filenames=filenames, name=name, link=link, lang=lang)
@APP.route('/<lang>/mappings', methods=['GET', 'POST'])
def contrast_mappings(lang):
"""
Displays the page accessible at '/<lang>/mappings'.
A TF-IDF visualisation is displayed,
using the TF-IDF values as font-size.
"""
mappings, filenames = readings.request_mappings_all()
return flask.render_template(lang+'/mappings.html', filenames=filenames, mappings=mappings, lang=lang)
@APP.route('/<lang>/mappings/<name>', methods=['GET', 'POST'])
def contrast_mappings_name(lang, name):
"""
Displays the page accessible at '/<lang>/mappings/<name>'.
A TF-IDF visualisation is displayed from the specific manifesto, using the TF-IDF values as font-size.
"""
mappings, filenames = readings.request_mappings(name)
return flask.render_template(lang+'/mappings-name.html', filenames=filenames, mappings=mappings[name], manifesto=name, lang=lang)
@APP.route('/<lang>/colophon', methods=['GET', 'POST'])
def colophon(lang):
"""
Displays colophon page.
"""
index = readings.load_index()
filenames = sorted([manifesto for manifesto, _ in index.items()])
return flask.render_template(lang+'/colophon.html', filenames=filenames, lang=lang)
if __name__ == '__main__':
if not 'index.json' in os.listdir('.'):
tfidf.create_index()
# APP.debug=True
APP.run()

BIN
static/css/fonts/LiberationSansNarrow-Bold.ttf

Binary file not shown.

BIN
static/css/fonts/unifont-11.0.03.ttf

Binary file not shown.

278
static/css/stylesheet.css

@ -0,0 +1,278 @@
@font-face{
font-family: 'unifont';
src:url('fonts/unifont-11.0.03.ttf');
}
@font-face{
font-family: 'script';
src:url('fonts/LiberationSansNarrow-Bold.ttf');
}
/* animations */
@keyframes blink {
0% { opacity: 1; }
10% { opacity: 0.2; }
20% { opacity: 1; }
100% {opacity: 1;}
}
/* "if we cant make noise its not our revolution" */
body{
position: relative;
min-width: 900px;
margin:20px;
font-family: unifont, sans-serif;
font-size: 8px;
overflow-x: hidden;
z-index: -1;
}
h1, h2, h3{
font-size: 16px;
font-weight: normal;
margin:1em 0;
}
hr{
border:0;
border-bottom:1px dotted;
clear: both;
margin:1em 0;
}
a, a:active, a:hover{
color:black;
text-decoration: none;
border-bottom:1px dotted;
padding:0;
margin:0;
}
sup, small{
font-size: 100%;
}
p{
margin:0;
}
code{
font-family: 'unifont';
font-size: 16px;
}
blockquote{
margin-left:50px;
}
.blink{
animation: 2s linear 1s infinite blink;
}
#wrapper{
position: absolute;
z-index: -1;
width: calc(100% - 400px);
min-width: 600px;
/*width: calc(100% - 300px);*/
margin:10px 0 20px 0;
font-family: unifont, sans-serif;
font-size: 16px;
line-height: 1.35;
}
#nav-wrapper{
position: relative;
width: 100%;
left:50px;
z-index: 0;
margin:0;
}
#nav{
display: inline-block;
}
#logo a{
font-family: 'script';
font-size: 64px;
font-weight: bold;
line-height: 1;
letter-spacing: -0.035em;
border: 0;
}
#search{
width: 250px;
position: relative;
margin:20px 20px 20px 0;
line-height: 72px;
vertical-align: top;
}
#search input#query{
width: 100%;
height: 40px;
padding:0px 10px;
border:1px solid black;
}
#search #submit{
position: absolute;
width: 26px;
height: 26px;
right: -12px;
top:24px;
border:0;
border-radius: 100%;
background-color:transparent;
text-align: center;
}
#search #submit:hover{
cursor: pointer;
}
#colophon_button{
position: absolute;
top:-1em;
right: -3em;
}
#print img{
position: fixed;
top: 100px;
right: 260px;
width:100px;
height: auto;
}
#print.connected{
display: block;
}
#print.disconnected{
display: none;
}
#content{
width:100%;
margin:10px 0 10px 0;
}
strong, strong.query, strong.word, .result{
font-family: 'script';
font-size: 22px;
line-height: 1.35;
font-weight: bold;
letter-spacing: -0.035em;
}
/* crossing TEXT */
.cross{
position: relative;
max-width: 800px;
margin-top: 1em;
padding-right: 2em;
clear: both;
}
p.tfidf, p.techfem, .cross p{
position: relative;
margin: 0 0 1.2em 0;
}
p.tfidf, p.techfem{
width: calc(50% - 1em);
display: inline-block;
}
p.techfem {
float: left;
margin-right: 2em;
}
p.techfem.sync{
display: block;
clear: both;
}
p.techfem:before{
content:' ◕';
padding-right: 0.5em;
}
p.tfidf{
}
p.tfidf.sync{
display: block;
float: right;
clear: both;
}
p.tfidf:before{
content:'◧';
padding-right: 0.5em;
}
p.note{
width: 100%;
text-align: center;
padding-bottom: 1em;
}
/* make sure that these blocks don't flow into the cross text blocks*/
#results, #notused, #suggestions, #conditionals, #analytics ,.analytics, .mappings, #colophon{
clear: both;
}
.results{
columns:220px auto;
column-gap:10px;
scroll-behavior: smooth;
}
.result{
display: inline-block;
width: 190px;
margin:0;
page-break-inside: avoid;
padding: 0 20px 0 20px;
transition: all 0.2s ease-in-out;
text-align:center;
z-index: 2;
}
.result strong.query{
display: block;
width: 100%;
height: 12em;
padding: 10em 6em 0;
margin:-9.7em 0 -10.5em -6em;
text-align: center;
}
.result .title, .result .ascii{
font-family: 'unifont';
font-size: 16px;
font-weight: normal;
}
.result .title{
padding:10px;
margin:3em 0 10px 0;
}
#txt-list{
position: absolute;
width:170px;
right: 0px;
top:-7px;
margin:20px;
font-size: 16px;
line-height: 1.2;
}
#txt-list ul{
margin:1em 0 0 0;
padding:0;
}
#txt-list ul li{
margin:0;
padding:0 0 10px 0;
text-indent: -40px;
list-style: none;
/*word-break: break-all;*/
}
#txt-list li a.contrast, a.contrast{
border: 0;
}
#colophon{
display: block;
width: 100%;
margin-left: 2em;
}
#manifesto_content_wrapper{
position: absolute;
left:0;
width: 100%;
margin:10px 0 0 10px;
padding:20px 0 40px 40px;
}
#manifesto_content{
display: block;
position: relative;
overflow-y: auto;
}
iframe#manifesto_content {
width: 100%;
height: calc(100vh - 200px);
}

20
static/html/1912_The-Manifesto-of-Futurist-Woman_Valentine-de-Saintpoint.html

@ -0,0 +1,20 @@
<h1>The Manifesto of Futurist Woman</h1>
<div>Valentine de Saintpoint</div>
<div>1912</div>
<img src="http://www.arengario.it/wp-content/uploads/2013/02/12-0500-saintpont-0712-donn.jpg">
<!-- https://upload.wikimedia.org/wikipedia/commons/1/14/Valentine_de_Saint-Point_1914_%281%29.jpg -->
<iframe src="https://www.wired.com/2008/11/the-manifesto-1/"></iframe>
<!--
<div>Read the manifesto ...
<br><br>
... in <strong>English</strong>: <a href="https://www.wired.com/2008/11/the-manifesto-1/">https://www.wired.com/2008/11/the-manifesto-1/</a><br>
... en <strong>Francais</strong>: ...
</div> -->

29
static/html/1967_SCUM_Manifesto.html

@ -0,0 +1,29 @@
<h1>SCUM Manifesto</h1>
<div>Valerie Solanas</div>
<div>1967</div>
<iframe src="https://scumwillcorrode.files.wordpress.com/2016/11/scum-1977.pdf"></iframe>
<small>[1977 published version, PDF/EN]</small>
<br><br>
<iframe src="https://web.archive.org/web/20050831003435/http://www.emf.net:80/~estephen/manifesto/aum00110.html"></iframe>
<iframe src="http://1libertaire.free.fr/fem-scum.html"></iframe>
<!--
<div>Read the manifesto ...
<br><br>
... in <strong>English</strong>: <a href="https://web.archive.org/web/20050831003435/http://www.emf.net:80/~estephen/manifesto/aum00110.html">https://web.archive.org/web/20050831003435/http://www.emf.net:80/~estephen/manifesto/aum00110.html</a><br>
... en <strong>Francais</strong>: ... <a href="http://1libertaire.free.fr/fem-scum.html">http://1libertaire.free.fr/fem-scum.html</a>
</div>
-->
<hr>
<div>
Other Links:
<br><br>
A geocities website about Valerie Solanas: <a href="https://web.archive.org/web/20050817015943/http://geocities.com/WestHollywood/Village/6982/solanas.html">https://web.archive.org/web/20050817015943/http://geocities.com/WestHollywood/Village/6982/solanas.html</a>
</div>

BIN
static/images/epson.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

2
static/js/jquery-3.3.1.min.js

File diff suppressed because one or more lines are too long

74
templates/en/base.html

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html lang='en'>
<head>
<meta charset="utf-8" />
<title>{% block title %}cyber/technofeminist cross-readings{% endblock %}</title>
<link type="text/css" rel="stylesheet" href="{{ url_for('static', filename='css/stylesheet.css')}}" />
<script type="text/javascript" src="{{ url_for('static', filename='js/jquery-3.3.1.min.js')}}"></script>
</head>
<body>
<div id="txt-list">
<p><em>Processing</em> and <em>cross-calculating</em> through the following manifesto's:</p>
<ul>
{% for filename in filenames | sort %}
<li>
<a href="/{{ lang }}/manifesto/{{ filename | replace('.txt','') }}">{{ filename | prettyfilename }}</a>
<a class="contrast" href="/{{ lang }}/mappings/{{ filename | replace('.txt','') }}"></a>
</li>
{% endfor %}
<ul>
{% block txtlist %}
{% endblock %}
</div>
<div id="wrapper">
<div id="nav-wrapper">
<div id="colophon_button">
EN / <a href="/fr">FR</a>
&nbsp;
<a href="/{{ lang }}/colophon">colophon</a>
</div>
<div id="logo">
<a href="/{{ lang }}/">cyber/technofeminist <br>cross-readings</a>
</div>
{% block search %}
<div id="search">
<form action="" method="GET">
<input id="query" name="q" value="{{query}}"/>
<input id="submit" type="submit" value="➜"/>
</form>
</div>
{% endblock %}
{% block nav %}
{% endblock %}
</div>
<div id="content">
{% block content %}
{% endblock %}
{% block results %}
{% endblock %}
{% block manifesto %}
{% endblock %}
{% block suggestions %}
{% endblock %}
</div>
</div>
</body>
<script>
// refresh when there is no (mouse or keyboard) activity for 5 minutes
var time = new Date().getTime();
$(document.body).bind("mousemove keypress", function(e) {
time = new Date().getTime();
});
function refresh() {
if(new Date().getTime() - time >= 300000){
console.log('refresh!');
window.location.href = '/fr/';
} else {
setTimeout(refresh, 300000);
}
}
setTimeout(refresh, 300000);
</script>
</html>

150
templates/en/colophon.html

@ -0,0 +1,150 @@
{% extends "en/base.html" %}
{% block title %}{% endblock %}
{% block search %}
{% endblock %}
{% block content %}
<br>
<br>
<br>
<br>
<br>
<div id ="colophon">
<p>
The <em>cyber/technofeminist cross-reader</em> is developed by <a href="http://manettaberends.nl/">Manetta Berends</a><br />
in the context of the exhibition <em>Computer Grrrls</em> in Paris (March - July 2019).<br>
</p>
<br>
<br>
<p>
Commissioned by:<br />
Inke Arns (Hartware Medien Kunst Verein, Dortmund)<br />
Marie Lechner (La Gaîté Lyrique, Paris)<br />
<br />
Cyber/technofeminist manifesto collection:<br />
Inke Arns<br>
Marie Lechner<br />
</p>
<br>
<p>Translation to French:<br />
Julie Boschat-Thorez</p>
<p><br />
Fonts:<br />
Unifont<br />
Liberation Sans Narrow Bold<br />
<br />
Software:<br />
Flask<br />
NLTK<br />
<br />
Copyleft:<br>
The <em>cyber/technofeminist cross-reader</em> is a free work, <br>
you can copy, distribute, and modify it under the terms <br>
of the <a href="http://artlibre.org/licence/lal/en/">Free Art License</a>.
<br>
<br>
Code:<br>
<a href="https://git.vvvvvvaria.org/mb/cross-reader">https://git.vvvvvvaria.org/mb/cross-reader</a><br />
<br>
This project is made possible with the support of the DICRéAM fund, Paris.
</p>
</div>
<br>
<div class="cross">
<br>
<p class="note">[Note on context]</p>
<p class="techfem">
The collection of cyber/technofeminist manifestos includes the following documents:
<br><br>
<em>Manifeste de la Femme Futuriste</em> [FR]<br>
<em>The Manifesto of Futurist Woman</em> [EN] <br>
written by Valentine de Saintpoint (1912)<br><br>
<em>S.C.U.M manifesto</em> [EN, FR]<br>
written by Valerie Solanas (1967)<br><br>
<em>A Cyborg Manifesto</em> [EN] <br>
<em>Manifeste Cyborg</em> [FR] <br>
written by Donna Haraway (1984)<br><br>
<em>RIOT GRRRL MANIFESTO</em> [EN] <br>
published in Bikini Zine (1989)<br><br>
<em>Cyberfeminist manifesto for the 21st century</em> [EN, FR] <br>
written by VNS Matrix (1991)<br><br>
<em>Bitch Mutant Manifesto</em> [EN] <br>
written by VNS Matrix (1996)<br><br>
<em>Cyberfeminism is not</em> [EN, DE, NL, FR] <br>
written by Old Boys Network (OBN) (1997)<br><br>
<em>Refugia</em> [EN] <br>
written by SubRosa (2002)<br><br>
<em>Glitch Manifesto </em>[EN] <br>
written by Rosa Menkman (2009)<br><br>
<em>Glitch Feminism Manifesto</em> [EN] <br>
written by Legacy Russell (2012)<br><br>
<em>The Mundane Afrofuturist Manifesto</em> [EN] <br>
written by Martine Syms (2013)<br><br>
<em>Wages for Facebook</em> [EN] <br>
written by Laurel Ptak (2013)<br><br>
<em>A Feminist Server Manifesto </em>[EN] <br>
published by Constant (2014)<br><br>
<em>Gynepunk Manifesto</em> [EN, ES, FR] <br>
written by Gynepunk (2014)<br><br>
<em>tRANShACKfEMINISta</em> [EN, ES, IT] <br>
written by Pechblenda Lab (2014)<br><br>
<em>Manifesto for the Gynecene</em> [EN] <br>
written by Alexandra Pirici and Raluca Voinea (2015)<br><br>
<em>The 3D Additivist Manifesto</em> [EN] + other languages available<br>
written by Morehshin Allahyari and Daniel Rourke (2015)<br><br>
<em>Xenofeminist manifesto</em> [EN, FR] + other languages available<br>
written by Laboria Cuboniks (2015)<br><br>
<em>Feminist Principles of the Internet </em>[EN] <br>
collective authorship, organized by Association for Progressive Communications (APC) (2016)<br><br>
<em>Hackers of Resistance Manifesto</em> [EN] <br>
written by HORS (2018)<br><br>
<em>Purple Noise Manifesto</em> [EN] <br>
written by Cornelia Sollfrank (2018)<br><br>
<em>The Call for Feminist Data</em> [EN] <br>
written by Caroline Sinders (2018)<br><br>
<em>Cyberwitches Manifesto </em>[EN, FR] <br>
written by Lucile Haute (2019)<br>
<br>
<br>
</p>
<p class="tfidf">The algorithm introduces the idea of a <em>context specific way</em> of counting words.
<br />
<br />
Karen's IDF part of the TF-IDF algorithm creates an ecosystem where the resulting numbers heavily depend on the presence of the other words. The deletion or addition of a document would change all the interrelations in the dataset, as the calculations fully depend on each other. Altough the practice of algorithmic text processing is inherently pretty brutal, as language is regarded as nothing but a <em>bag-of-words</em>, the TF-IDF algorithm and its algorithmic character, give us a way of counting that creates situated datasets where values are determined by their self-created context.
</p>
<hr>
<div>
User Notice for Copyrighted Materials in this collection
<br><br>
Some of the manifestos in this collection are protected by copyright law, where the copyright is owned by third parties. Fair use permits only certain limited uses of the content. The author of this project is using the third-party content under a fair use doctrine, making a navigational cross-reading tool available to you. The third-party content is used to create an access-point, to read, explore and study them.
</div>
<br>
<br>
</div>
</div>
{% endblock %}

52
templates/en/index.html

@ -0,0 +1,52 @@
{% extends "en/base.html" %}
{% block results %}
<div id="intro" class="cross">
<!-- <p>Psst, this is a cross-reading tool that operates on two axes ...</p> -->
<!-- <p>Tip: Try to search for a single word.</p> -->
<p>A cross-reading allows for a specific type of reading that does not follow a linear order, but follows a self-chosen path. Reading becomes an act of creating relations and threading connections, reading across different domains. </p>
<p>This cyber/technofeminist cross-reader does not follow one but two axes, bridging the act of reading a collection of texts, with the act of reading a tool. Reading across a collection of manifestos, while reading the algorithm that is used to do so.</p>
<p>These cross-readings connect ...</p>
<p class="tfidf">... the <em>Term Frequency Inverse Document Frequency</em> algorithm, or <em>TF-IDF</em> in short</p>
<p class="techfem">... a collection of <em>cyber- and technofeminist manifestos</em></p>
<p class="tfidf">The TF-IDF is a commonly used algorithm to find the most important words of a document. The algorithm is (partly) written by the female computer scientist Karen Spärck Jones in the 1970s and has become one of the important algorithms of many search tools online, such as digital library systems or corporate search engines like Yandex or Google. The algorithm turns written documents into a sorted lists of search results, using a specific relative and inversed way of counting, that is sensitive for contrast in written documents. </p>
<p class="techfem">The cyber/technofeminist manifestos connect feminist thinking to technology, introducing feminist servers, cyborg figures, cyberwitches, or pleas for the glitch as cultural digital artefact. This collection, which is obviously incomplete, brings a diverse set of technofeminist documents together that are published between 1912 and 2019. The manifestos speak about very different concerns and questions, but they connect in terms of energy level. Urging to make a statement, ready to activate.
<br><br>
An interesting note to mention: Karen Spärck Jones was an advocate for the position of women in computing. <em>“I’ve been trying to think a little bit—but it’s very dispiriting!—about how to try to get more women into computer science. On the whole, everybody who thinks about this is depressed, because we’re going backwards rather than forwards.”</em> <sup><a href="https://ethw.org/Oral-History:Karen_Sp%C3%A4rck_Jones#On_Getting_More_Women_into_Computer_Science"></a></sup></p>
<p>These two axes, the algorithm and the manifestos, interoperate. They support and strengthen eachother as the X and Y of this cross-reading tool. </p>
<p>The TF-IDF algorithm, while responding to a search request, creates cross-readings through the manifestos. It outputs a list of search results around the subject of search, creating a field of statements, questions and concerns around one single word. Meanwhile, the algorithm starts to interoperate with the manifesto as a format. Sensitive as it is for bulletpointed writing, repetition and unique words -- elements that are used a lot in these statement driven documents. The algorithm prioritizes higher contrastful language over academic writing, repetition over very diverse vocabularies and the use of unique words over the use of common ones.</p>
<p>See this cross-reading tool as an exercise in reading, across a field of technofeminist thinking and a tool for algorithmic sorting.</p>
<!-- <p>Reading the TF-IDF algorithm by itself only results in a technical understanding of this old artefact.</p> -->
<!-- <p>This algorithm thereby connects a discrete procedure (that of computing) to a very vivid and strong document format: the manifesto.</p> -->
</div>
{% endblock %}
{% block suggestions %}
<br>
<h1>Cross-reading suggestions (selected):</h1>
<div>
{% for word in suggestions %}
<strong class="query"><a href="/{{ lang}}/?q={{word}}">{{ word.strip() }}</a></strong>
{% endfor %}
</div>
<br>
<h1>Contrast-mapping suggestions (top 100 TF-IDF results):</h1>
<div>
{% for tfidf, word in mappings %}
<strong class="query" style="font-size:{{ 50 + tfidf / 5 }}%;"> <a href="/{{ lang }}/?q={{ word }}">{{ word }}</a> </strong>
{% endfor %}
</div>
<br>
<p>Read more about the <a href="/{{ lang}}/mappings/{{ filenames[0] }}">TF-IDF algorithm and contrast mappings</a>.</p>
<br>
<br>
{% endblock %}

20
templates/en/manifesto.html

@ -0,0 +1,20 @@
{% extends "en/base.html" %}
{% block title %}{% endblock %}
{% block search %}
{% endblock %}
{% block content %}
{% endblock %}
{% block manifesto %}
<div id="manifesto_content_wrapper">
{% if '.jpg' in link %}
<img src="{{ link }}" />
{% elif 'vimeo' in link %}
<iframe src="{{ link }}" width="600" height="360" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>
{% else %}
<iframe id="manifesto_content" src="{{ link }}"></iframe>
{% endif%}
</div>
{% endblock %}

62
templates/en/mappings-name.html

@ -0,0 +1,62 @@
{% extends "en/base.html" %}
{% block title %}{% endblock %}
{% block search %}
{% endblock %}
{% block results %}
<div class="cross">
<p class="tfidf" style="margin-left: calc(50% + 1.5em);">
<code>
def tfidf(query, words, corpus):<br /><br>
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;# Term Frequency<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tf_count = 0<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for word in words:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if query == word:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tf_count += 1<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tf = tf_count/len(words)<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;# Inverse Document Frequency<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;idf_count = 0<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for words in corpus:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if query in words:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;idf_count += 1<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tfidf_value = tf * idf<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;return tf_count, tf_count, tfidf_value
</code>
</p>
<br><br>
<p class="note">[Note on contrast mappings]</p>
<p class="tfidf" style="float: right;margin-left:1em;">
The TF-IDF algorithm, shown above in the programming language Python, weaves a layer of contrast into the text. Not literally, but in the form of numbers. The most contrastful words are those that the algorithm consideres as the most important words for that text.
<br><br>
These contrast mappings allow for reading across the manifesto and the algorithm.
<br><br>
The TF-IDF values are calculated in two steps. The algorithm first counts the <em>Term Frequency</em> (TF) by counting the appearance of a word in the text, relatively to the total number of words in the document. This way of relative frequency counting makes it possible to compare wordcounts between documents with variating lengths. This makes it possible to compare Donna Harraway's long essay <em><em>A Cyborg Manifesto</em></em> (1984) with the relatively short text of <em><em>The Call for Feminist Data</em></em> written by Caroline Sinders (2018).
<br><br>
In the second step, the algorithm counts relatively against all the other documents in the same dataset, using the <em>Inversed Document Frequency</em> (IDF). This part of the algorithm, which is Karen Spärck Jones’ addition, introduced a subtle form of inversed relative counting throughout all the documents in the dataset. Instead of just counting word-frequency in one document, Karen proposed to count in a relative inter-document way.
<br><br>
This means that when a word only appears in one or a few documents, that its value is greatly enlarged. The concequence being that words as <em><em>the</em></em> or <em><em>it</em></em> will be given a very low number, as they appear in all the documents. And specific words, such as <em>paranodal</em> in <em>A Feminist Server Manifesto</em>, will get a very high value as this word is only used 4 times in the whole dataset and all of those 4 occurances where in this manifesto.
<br><br>
Another example is <em>SCUM</em>. Although the word <em>SCUM</em> is not the most commonly used word in the <em>S.C.U.M. Manifesto</em>, it is the word that gets the highest score: relative to all the other manifesto's, <em>SCUM</em> is mostly used in this manifesto. This increases the score a lot.
</p>
</div>
<div id="mappings">
<h1>{{ manifesto | prettyfilename }}</h1>
{% for sentence in mappings %}
<p class="sentence">
{% for word, tfidf in sentence %}
<strong class="query" style="font-size:{{ 50 + tfidf }}%;"> <a href="/{{ lang }}/?q={{ word }}">{{ word }}</a> </strong>
{% endfor %}
</p>
{% endfor %}
</div>
{% endblock %}
{% block suggestions %}
{% endblock %}

17
templates/en/mappings.html

@ -0,0 +1,17 @@
{% extends "en/base.html" %}
{% block title %}{% endblock %}
{% block txtlist %}
{% endblock %}
{% block results %}
<h1>Contrast mappings, cross-reading with algorithmic results.</h1>
<div id="mappings">
{% for manifesto, items in mappings.items() %}
<h1>{{ manifesto }}</h1>
{% for tfidf, word in items %}
<strong class="query" style="font-size:{{ 50 + tfidf }}%;"> <a href="/{{ lang }}/?q={{ word }}">{{ word }}</a> </strong>
{% endfor %}
{% endfor %}
</div>
{% endblock %}

27
templates/en/ordered.html

@ -0,0 +1,27 @@
{% extends "en/base.html" %}
{% block title %}{% endblock %}
{% block txtlist %}
{% endblock %}
{% block results %}
<h1>lists</h1>
<div id="listing_per_manifesto" class="listings">
{% for manifesto, words in results.items()|sort %}
<span class="result">
<div class="ascii">
/\|\/\<br>
_)&nbsp;&nbsp;&nbsp;&nbsp;(__<br>
\_&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;_/ <br>
)&nbsp;&nbsp;&nbsp;&nbsp;\ <br>
\/\|\/
</div>
<h2>{{manifesto}}</h2>
{% for value, word in words %}
<strong style="font-size:{{value * 50000}}%;"><a href="{{ url_for('index')}}?q={{word}}">{{word}}</a></strong>
{% endfor %}
</span>
{% endfor %}
<hr>
</div>
{% endblock %}

111
templates/en/results.html

@ -0,0 +1,111 @@
{% extends "en/base.html" %}
{% block title %}{{query}}{% endblock %}
{% block nav %}
<div id="print" class="blink {{ connection }}">
<a href="?q={{ query }}&print=now"><img src="/static/images/epson.png"></a>
</div>
{% endblock %}
{% block results %}
{% if results == {} %}
<div id="notused">This word could not be found.</div>
{% else %}
<div class="cross">
<p class="note">[Note on the search results]</p>
<p class="tfidf">Once a search query is submitted, the <em>TF-IDF</em> starts to go through all the manifestos in the dataset, to see if a word is used, and if yes, to put it in the list of search results. The list is sorted according to the importance of the word <strong>{{ query }} </strong> to each manifesto. </p>
<p class="techfem">The search results are snippets, statements, quotes or questions, threaded together by one single word, create a cross-reading index. The occurances of <strong> {{ query }}</strong> in the different manifestos, become eachother complexifiers. </p>
<br>
<br>
</div>
<div id="results">
<h1>Cross-reading through the manifestos along the axes of <strong class="query">{{ query }}</strong>:</h1>
<div class="results">
{% for x, manifesto in results.items() %}
{% for sentence in manifesto.html %}
<div id="{{ x }}_{{ loop.index }}" class="result">
<div class="title">
{{ manifesto.name }}
</div>
<div class="sentence">{{ sentence }}</div>
</div>
{% endfor %}
{% endfor %}
</div>
</div>
<br>
<br>
<br>
<div class="cross">
<p class="note">[Note on activating]</p>
<p class="tfidf">
The <em>TF-IDF algorithm</em> is an activator. Activating non-linear threads through a dataset of words.
<br><br>
As a navigator the algorithm is a provider of order, deciding on the importance of the search term for a manifesto.
</p>
<p class="techfem">
The <em>cyber- and technofeminist manifesto</em> are written and published to activate. Declaring intentions, motives or specific views on technology. Their writing styles, different as they are, are often statement-based, short and sometimes militant.
<br><br>
<em>&quot;Nothing should be accepted as fixed, permanent, or 'given' -- neither material conditions nor social forms..&quot;</em> (Xenofeminist manifesto), <br>
<em>&quot;So that is why no revolution should be without her.&quot;</em> (The Manifesto of Futurist Woman)
</p>
</div>
{% endif %}
<br>
<br>
<br>
<br>
<br>
<br>
<br>
{% endblock %}
{% block suggestions %}
<div id="suggestions">
<h1>SUGGESTIONS</h1>
<p class="cross">The cross-reader took the freedom to suggest. Some of its suggestions are pre-selected, such as the list of suggested cross-readings. Other suggestions are generated, using language as a playful tool to trigger connections and explore the dimensionality of a single word.</p>
<br>
{% if analytics %}
{% if analytics.stemmer %}
<div id="similars" class="analytics stemmer">
<h2>Similar but not the same to <em>{{ query }}</em>:</h2>
{% for word in analytics.stemmer %}
<strong class="word"><a href="/{{ lang}}/?q={{ word }}">{{ word }}</a></strong>
{% endfor%}
</div>
<br>
{% endif%}
{% endif%}
<div class="suggestions techfem">
<h1>Cross-reading suggestions (selected):</h1>
{% for word in suggestions %}
<strong class="query"><a href="?q={{word}}">{{ word.strip() }}</a></strong>
{% endfor %}
</div>
{% if analytics.mappings %}
<br>
<div class="suggestions tfidf">
<h1>Contrast mappings suggestions (top 50 TF-IDF results) for <em>{{ results[0]['name']}}</em>: <br></h1>
<div class="">
{% for tfidf, word in analytics.mappings[:50] %}
{% if word == query %}
<strong class="word" style="font-size:{{ 100 + tfidf }}%;">{{ word }}</strong>
{% else %}
<strong class="word" style="font-size:{{ 100 + tfidf }}%;"><a href="/{{ lang}}/?q={{ word }}">{{ word }}</a></strong>
{% endif%}
{% endfor%}
</div>
<br>
<div><a class="contrast" href="/{{ lang}}/mappings/{{ results[0]['filename'].strip() | replace('.txt','') }}">Read more ◐</a></div>
<br>
<br>
</div>
{% endif%}
</div>
{% endblock %}

74
templates/fr/base.html

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html lang='fr'>
<head>
<meta charset="utf-8" />
<title>{% block title %}cyber/technofeminist cross-readings{% endblock %}</title>
<link type="text/css" rel="stylesheet" href="{{ url_for('static', filename='css/stylesheet.css')}}" />
<script type="text/javascript" src="{{ url_for('static', filename='js/jquery-3.3.1.min.js')}}"></script>
</head>
<body>
<div id="txt-list">
<p><em>Processing</em> and <em>cross-calculating</em> through the following manifesto's:</p>
<ul>
{% for filename in filenames | sort %}
<li>
<a href="/{{ lang }}/manifesto/{{ filename | replace('.txt','') }}">{{ filename | prettyfilename }}</a>
<a class="contrast" href="/{{ lang }}/mappings/{{ filename | replace('.txt','') }}"></a>
</li>
{% endfor %}
<ul>
{% block txtlist %}
{% endblock %}
</div>
<div id="wrapper">
<div id="nav-wrapper">
<div id="colophon_button">
<a href="/en">EN</a> / FR
&nbsp;
<a href="/{{ lang }}/colophon">colophon</a>
</div>
<div id="logo">
<a href="/{{ lang }}/">cyber/technofeminist <br>cross-readings</a>
</div>
{% block search %}
<div id="search">
<form action="" method="GET">
<input id="query" name="q" value="{{query}}"/>
<input id="submit" type="submit" value="➜"/>
</form>
</div>
{% endblock %}
{% block nav %}
{% endblock %}
</div>
<div id="content">
{% block content %}
{% endblock %}
{% block results %}
{% endblock %}
{% block manifesto %}
{% endblock %}
{% block suggestions %}
{% endblock %}
</div>
</div>
</body>
<script>
// refresh when there is no (mouse or keyboard) activity for 5 minutes
var time = new Date().getTime();
$(document.body).bind("mousemove keypress", function(e) {
time = new Date().getTime();
});
function refresh() {
if(new Date().getTime() - time >= 300000){
console.log('refresh!');
window.location.href = '/fr/';
} else {
setTimeout(refresh, 300000);
}
}
setTimeout(refresh, 300000);
</script>
</html>

158
templates/fr/colophon.html

@ -0,0 +1,158 @@
{% extends "fr/base.html" %}
{% block title %}{% endblock %}
{% block search %}
{% endblock %}
{% block content %}
<br>
<br>
<br>
<br>
<br>
<div id ="colophon">
<p>
La <em>cyber/technofeminist cross-reader</em> a été développée par <a href="http://manettaberends.nl/">Manetta Berends</a><br>
pour l’exposition <em>Computer Grrrls</em> à Paris (Mars - Juillet 2019).<br>
</p>
<br>
<br>
<p>
Commission par:<br />
Inke Arns (Hartware Medien Kunst Verein, Dortmund)<br>
Marie Lechner (La Gaîté Lyrique, Paris)<br />
<br />
Curation des manifestes Cyber/technoféministes par:<br>
Inke Arns<br>
Marie Lechner<br>
<br>
Traduction française: <br />
Julie Boschat-Thorez<br>
<br />
Polices d’écriture:<br />
Unifont<br />
Liberation Sans Narrow Bold<br />
<br />
Logiciel:<br />
Flask<br />
NLTK<br />
<br />
Copyleft:<br>
Le <em>cyber/technofeminist cross-reader</em> est un travail libre,<br>
vous pouvez le copier, le distribuer, et le modifier <br>
sous les termes de la <a href="http://artlibre.org/licence/lal/en/">Licence artistique libre</a>.
<br>
<br>
Code:<br>
<a href="https://git.vvvvvvaria.org/mb/cross-reader">https://git.vvvvvvaria.org/mb/cross-reader</a><br />
<br>
Ce projet a été réalisé avec le soutien du fond DICRéAM, Paris.
</p>
</div>
<br>
<div class="cross">
<br>
<p class="note">[Note sur la contextualisation]</p>
<p class="techfem">
La collection des manifestes cyber/technoféministes comprend les documents suivants:
<br><br>
<em>Manifeste de la Femme Futuriste</em> [FR]<br>
<em>The Manifesto of Futurist Woman</em> [EN] <br>
par Valentine de Saintpoint (1912)<br><br>
<em>S.C.U.M manifesto</em> [EN, FR]<br>
par Valerie Solanas (1967)<br><br>
<em>A Cyborg Manifesto</em> [EN] <br>
<em>Manifeste Cyborg</em> [FR] <br>
par Donna Haraway (1984)<br><br>
<em>RIOT GRRRL MANIFESTO</em> [EN] <br>
publié par Bikini Zine (1989)<br><br>
<em>Cyberfeminist manifesto for the 21st century</em> [EN, FR] <br>
par VNS Matrix (1991)<br><br>
<em>Bitch Mutant Manifesto</em> [EN] <br>
par VNS Matrix (1996)<br><br>
<em>Cyberfeminism is not</em> [EN, DE, NL, FR] <br>
par Old Boys Network (OBN) (1997)<br><br>
<em>Refugia</em> [EN] <br>
par SubRosa (2002)<br><br>
<em>Glitch Manifesto </em>[EN] <br>
par Rosa Menkman (2009)<br><br>
<em>Glitch Feminism Manifesto</em> [EN] <br>
par Legacy Russell (2012)<br><br>
<em>The Mundane Afrofuturist Manifesto</em> [EN] <br>
par Martine Syms (2013)<br><br>
<em>Wages for Facebook</em> [EN] <br>
par Laurel Ptak (2013)<br><br>
<em>A Feminist Server Manifesto </em>[EN] <br>
publié par Constant (2014)<br><br>
<em>Gynepunk Manifesto</em> [EN, ES, FR] <br>
par Gynepunk (2014)<br><br>
<em>tRANShACKfEMINISta</em> [EN, ES, IT] <br>
par Pechblenda Lab (2014)<br><br>
<em>Manifesto for the Gynecene</em> [EN] <br>
par Alexandra Pirici and Raluca Voinea (2015)<br><br>
<em>The 3D Additivist Manifesto</em> [EN] + other languages available<br>
par Morehshin Allahyari and Daniel Rourke (2015)<br><br>
<em>Xenofeminist manifesto</em> [EN, FR] + other languages available<br>
par Laboria Cuboniks (2015)<br><br>
<em>Feminist Principles of the Internet </em>[EN] <br>
Rédaction collective, coordonnée par la «Association for Progressive Communications» (APC) (2016)<br><br>
<em>Hackers of Resistance Manifesto</em> [EN] <br>
par HORS (2018)<br><br>
<em>Purple Noise Manifesto</em> [EN] <br>
par Cornelia Sollfrank (2018)<br><br>
<em>The Call for Feminist Data</em> [EN] <br>
par Caroline Sinders (2018)<br><br>
<em>Cyberwitches Manifesto </em>[EN, FR] <br>
par Lucile Haute (2019)<br>
<br>
<br>
</p>
<p class="tfidf">L’algorithme amorce des idées sur le comptage localisé de mots.
<br />
<br />
La partie IDF (de l’algorithme TF-IDF), rédigée par Karen Spärck Jones, créé un ecosystème au sein duquel les valeurs obtenues dépendent de la présence d’autres mots. La supression ou l’ajout d’un document affecte les relations entre l’ensemble des documents, car les calculs se basent sur leur totalité. Bien que les algorithmes de traitement de texte aient une approche fondamentalement brutale, puisque le langage n’y est considéré que comme un ensemble de données, l’algorithme TF-IDF exerce d’une méthode de comptage contextualisante. Les valeurs accordées aux textes sont déterminées par le contexte créé via leur association.
</p>
<hr>
<div>
Avis à l'utilisateur concernant les documents protégés par le droit d'auteur dans cette collection
<br><br>
Certains des manifestes de cette collection sont protégés par la loi sur le droit d'auteur, lorsque le droit d'auteur appartient à des tiers. L'utilisation équitable n'autorise que certaines utilisations limitées du contenu. L'auteur de ce projet utilise le contenu d'une tierce partie dans le cadre d'une doctrine d'utilisation équitable et met à votre disposition un outil de navigation et de lecture croisée. Les contenus de tiers sont utilisés pour créer un point d'accès, pour les lire, les explorer et les étudier.
</div>
<br>
<br>
</div>
</div>
{% endblock %}

53
templates/fr/index.html

@ -0,0 +1,53 @@
{% extends "fr/base.html" %}
{% block results %}
<div id="intro" class="cross">
<blockquote>Échanges entre une <em>collection de manifestes cyber/technoféministes</em>
<br>et <em>l’algorithme “Term Frequency – Inversed Document Frequency” (TF-IDF)</em>.</blockquote>
<br>
<br>
<p>La lecture transversale permet un type de lecture spécifique, ne poursuivant pas un ordre linéaire, selon un parcours choisi. La lecture devient un acte de création de relations et de tissage de connections, via la traversée de différents domaines.</p>
<p>Cette liseuse transversale cyber/technoféministe s’établit sur deux axes, alliant la lecture d’un corpus de textes et celle d’un outil technologique. Le processus de lecture englobe un ensemble de manifestes ainsi que l’algorithme utilisé pour ce faire.</p>
<p>Ces lectures transversales connectent ...</p>
<p class="tfidf">... l’algorithme TF-IDF, de l’anglais <em>Term Frequency Inverse Document Frequency</em></p>
<p class="techfem">... et une <em>collection de manifestes cyber et technoféministes</em></p>
<p class="tfidf">Le TF-IDF est un algorithme utilisé communément afin d’identifier les mots les plus importants au sein d’un document. Cet algorithme a été en partie développé par l’informaticienne Späcrk Jones dans les années 70. Il est devenu l’un des algorithmes les plus importants pour un grand nombre de moteurs de recherche en ligne tels que Yandex ou Google. L’algorithme transforme les documents textuels en listes, où les résultats de recherche sont triés. Il utilise un mode de comptage inversé, sensible aux variations présentes au sein de ces documents.</p>
<p class="techfem">Les manifestes cyber/technoféministes lient pensée féministe et technologie, en introduisant le concept de serveurs féministes, des figures cyborg, des cyber-sorcières, voire des plaidoyers à la faveur du glitch comme artefact culturel numérique. Cette collection, évidemment non exhaustive, rassemble divers documents technoféministes publiés entre 1912 et 2019. Bien que ces manifestes évoquent des questions et préoccupations diverses, ils convergent sur le plan de l’énergie déployée. Ils posent l’urgence d’établir un constat, prêts à être mis en oeuvre.
<br><br>
Par ailleurs, il convient de noter que Karen Spärck Jones défendait la place des femmes dans le milieu informatique. <em>“J’ai essayé de réfléchir un peu – mais c’est très décourageant! - à la façon de parvenir à ce que plus de femmes soient présentes en informatique. Globalement, chaque personne réfléchissant sur le sujet finit par être déprimée, car nous reculons plus que nous ne progressons.”</em> <sup><a href="https://ethw.org/Oral-History:Karen_Sp%C3%A4rck_Jones#On_Getting_More_Women_into_Computer_Science"></a></sup></p>
<p>Ces deux axes, celui de l’algorithme et celui des manifestes, opèrent ensemble. Ils se soutiennent et se renforcent mutuellement, créant de nouvelles dimensions de lectures via cette liseuse transversale.</p>
<p>L’algorithme TF-IDF génère une lecture transversale des manifestes tout en répondant à une requête. Il en ressort une liste de résultats autour du terme; un ensemble de déclarations, questions et préoccupations autour d’un même terme. Simultanément, l’algorithme interagit avec le format du manifeste : il est sensible aux puces typographiques, aux répétitions et aux mots uniques – des éléments qui caractérisent ces documents d’ordre déclaratif. L’algorithme donne la priorité aux formes textuelles très contrastées par rapport à celles ayant un style plus académique, à la répétition sur la diversité de vocabulaire, et à l’usage de mots uniques sur les mots plus communs.</p>
<p>Cet outil de lecture transversale est donc à considérer tel un exercice de lecture, à travers le champ commun de la pensée technoféministe et d’un outil de classification algorithmique.</p>
</div>
{% endblock %}
{% block suggestions %}
<br>
<h1>Suggestions de lectures croisées:</h1>
<div>
{% for word in suggestions %}
<strong class="query"><a href="/{{ lang}}/?q={{word}}">{{ word.strip() }}</a></strong>
{% endfor %}
</div>
<br>
<h1>Suggestions du mapping de contraste (top 100 TF-IDF résultats):</h1>
<div>
{% for tfidf, word in mappings %}
<strong class="query" style="font-size:{{ 50 + tfidf / 5 }}%;"> <a href="/{{ lang }}/?q={{ word }}">{{ word }}</a> </strong>
{% endfor %}
</div>
<br>
<p>Pour en lire plus au sujet <a href="/{{ lang}}/mappings/{{ filenames[0] }}">de l’algorithme TF-IDF et du mapping de contraste</a>.</p>
<br>
<br>
{% endblock %}

20
templates/fr/manifesto.html

@ -0,0 +1,20 @@
{% extends "fr/base.html" %}
{% block title %}{% endblock %}
{% block search %}
{% endblock %}
{% block content %}
{% endblock %}
{% block manifesto %}
<div id="manifesto_content_wrapper">
{% if '.jpg' in link %}
<img src="{{ link }}" />
{% elif 'vimeo' in link %}
<iframe src="{{ link }}" width="600" height="360" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>
{% else %}
<iframe id="manifesto_content" src="{{ link }}"></iframe>
{% endif%}
</div>
{% endblock %}

62
templates/fr/mappings-name.html

@ -0,0 +1,62 @@
{% extends "fr/base.html" %}
{% block title %}{% endblock %}
{% block search %}
{% endblock %}
{% block results %}
<div class="cross">
<p class="tfidf" style="margin-left: calc(50% + 1.5em);">
<code>
def tfidf(query, words, corpus):<br /><br>
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;# Term Frequency<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tf_count = 0<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for word in words:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if query == word:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tf_count += 1<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tf = tf_count/len(words)<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;# Inverse Document Frequency<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;idf_count = 0<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for words in corpus:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if query in words:<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;idf_count += 1<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tfidf_value = tf * idf<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<br />
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;return tf_count, tf_count, tfidf_value
</code>
</p>
<br><br>
<p class="note">[Note sur les mappings de contraste]</p>
<p class="tfidf" style="float: right;margin-left:1em;">
L’algorithme TF-IDF, formaté ci dessus dans le language de programmation Python, intègre un niveau de contraste au sein du texte, sous une forme numérique. Les mots se démarquant le plus fortement sont considérés par l’algorithme comme les plus importants pour ce texte.
<br><br>
Ces mappings de contraste permettent une lecture simultannée des manifestes et de l’algorithme.
<br><br>
Les valeurs TF-IDF sont calculées en deux étapes. Premièrement, l’algorithme compte la fréquence <em>Term Frequency (TF)</em> en recherchant l’apparition d’un mot dans le texte, proportionellement au nombre total de mots au sein de ce texte. Cette façon de rechercher la fréquence rend possible la comparaison du nombre de mots entre des documents de largeur variable. Ceci rend la comparaison possible entre le très long <em>Manifeste Cyborg</em> de Donna Haraway (1984) et le Call for Feminist Data de Caroline Sinders (2018), lui-même relativement succint.
<br><br>
Lors de la seconde étape, l’algorithme établira un comptage comparatif avec tous les autres documents dans l’ensemble donné, en utilisant la partie <em>Inversed Document Frequency (IDF)</em>. Cette partie de l’algorithme, qui constitue la contribution de Karen Spärck Jones, a introduit une forme subtile d’inversion relative à travers tous les documents d’un ensemble donné. Plutôt que de se limiter à un calcul de fréquence au sein d’un document, Karen a proposé de comparer cette fréquence entre tous les documents. Cela signifie que lorsqu’un mot n’apparaît qu’au sein d’une quantité très restreinte de documents, sa valeur sera élargie.
<br><br>
En conséquence, des mots tels que <em>“le”</em>, <em>“elle”</em> ou <em>“il”</em> obtiendront un nombre très petit, puisqu’ils apparaissent fréquemment. Le terme paranodal n’apparaît que 4 fois dans A Feminist Server Manifesto. Il s’agit d’un terme très fort et specifique, n’apparaissant dans aucun autre du corpus.
<br><br>
Autre cas de figure est <em>SCUM</em>. Bien que le mot <em>SCUM</em> ne soit pas le mot le plus utilisé dans le <em>S.C.U.M Manifesto</em>, il s’agit du mot obtenant le score le plus élevé: en comparaison avec tous les autres manifestes, le mot <em>SCUM</em> est principalement utilisé dans cet ouvrage ce qui a pour conséquence d’augmenter son score très fortement.
</p>
</div>
<div id="mappings">
<h1>{{ manifesto | prettyfilename }}</h1>
{% for sentence in mappings %}
<p class="sentence">
{% for word, tfidf in sentence %}
<strong class="query" style="font-size:{{ 50 + tfidf }}%;"> <a href="/{{ lang }}/?q={{ word }}">{{ word }}</a> </strong>
{% endfor %}
</p>
{% endfor %}
</div>
{% endblock %}
{% block suggestions %}
{% endblock %}

20
templates/fr/mappings.html

@ -0,0 +1,20 @@
{% extends "fr/base.html" %}
{% block title %}{% endblock %}
{% block txtlist %}
{% endblock %}
{% block results %}
<h1>Contrast mappings, cross-reading with algorithmic results.</h1>
<div id="mappings">
{% for manifesto, items in mappings.items() %}
<h1>{{ manifesto }}</h1>
{% for tfidf, word in items %}
<strong class="query" style="font-size:{{ 50 + tfidf }}%;"> <a href="/{{ lang }}/?q={{ word }}">{{ word }}</a> </strong>
{% endfor %}
{% endfor %}
</div>
{% endblock %}
{% block suggestions %}
{% endblock %}

27
templates/fr/ordered.html

@ -0,0 +1,27 @@
{% extends "fr/base.html" %}
{% block title %}{% endblock %}
{% block txtlist %}
{% endblock %}
{% block results %}
<h1>lists</h1>
<div id="listing_per_manifesto" class="listings">
{% for manifesto, words in results.items()|sort %}
<span class="result">
<div class="ascii">
/\|\/\<br>
_)&nbsp;&nbsp;&nbsp;&nbsp;(__<br>
\_&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;_/ <br>
)&nbsp;&nbsp;&nbsp;&nbsp;\ <br>
\/\|\/
</div>
<h2>{{manifesto}}</h2>
{% for value, word in words %}
<strong style="font-size:{{value * 50000}}%;"><a href="{{ url_for('index')}}?q={{word}}">{{word}}</a></strong>
{% endfor %}
</span>
{% endfor %}
<hr>
</div>
{% endblock %}

118
templates/fr/results.html

@ -0,0 +1,118 @@
{% extends "fr/base.html" %}
{% block title %}{{ query }} - cyber/technofeminist cross-readings{% endblock %}
{% block nav %}
<div id="print" class="blink {{ connection }}">
<a href="?q={{ query }}&print=now"><img src="/static/images/epson.png"></a>
</div>
{% endblock %}
{% block results %}
{% if results == {} %}
<div id="notused">Ce mot n'a pas pu être trouvé.</div>
{% else %}
<div class="cross">
<p class="note">[Note sur les résultats de recherche]</p>
<p class="tfidf">Lorsqu’une requête est soumise, le <em>TF-IDF</em> commence à parcourir la totalité des manifestes de l’ensemble donné afin de vérifier si un mot y est utilisé, et si c’est le cas, de l’ajouter à la liste des résultats. Le tri au sein de la liste est effectué en fonction de l’importance du mot <strong>{{ query }}</strong> à l’intérieur de chaque manifeste.</p>
<p class="techfem">Les résultats de recherche sont des extraits, des déclarations, des citations ou des questions, reliés entre eux par un mot unique, formant un index de lecture croisée autour de <strong>{{ query }}</strong>. Les occurrences de <strong>{{ query }}</strong> dans chaque manifeste augmentent la complexité des relations qu’ils entretiennent. </p>
<br>
<br>
</div>
<div id="results">
<h1>Lecture transversale des manifestes selon l’axe de <strong class="query">{{ query }}</strong>:</h1>
<div class="results">
{% for x, manifesto in results.items() %}
{% for sentence in manifesto.html %}
<div id="{{ x }}_{{ loop.index }}" class="result">
<div class="title">
{{ manifesto.name }}
</div>
<div class="sentence">{{ sentence }}</div>
</div>
{% endfor %}
{% endfor %}
</div>
</div>
<br><br><br>
<div class="cross">
<p class="note">[Note on activating]</p>
<p class="tfidf">
L’algorithme <em>TF-IDF</em> est un catalyseur, générant des connections multidimentionelles à travers un ensemble de mots, qui constituent ici les données.
<br><br>
Tel un guide pour le lecteur, l’algorithme délivre un classement, en décidant de l’importance du terme de recherche au sein de chaque manifeste.
</p>
<p class="techfem">
Les <em>manifestes cyber et technoféministes</em> sont écrits et publiés dans un but d’application. Ils émettent des déclarations sur les intentions, motivations et vues spécifiques sur la technologie. Leurs styles d’écriture, aussi différents soient-ils, sont souvent basés sur la mise en forme de déclarations courtes, voire même militantes.
<br><br>
<em>«Rien ne devrait être admis comme figé, permanent ou «donné» – ni les conditions matérielles ni les formes sociales»</em> (Manifeste Xénoféministe), <br>
<em>«Voilà pourquoi aucune révolution ne doit lui rester étrangère.»</em> (Le manifeste de la femme futuriste)
</p>
</div>
{% endif %}
<br>
<br>
<br>
<br>
<br>
{% if analytics %}
<div id="analytics">
<h1>SUGGESTIONS</h1>
<p>La liseuse transversale est dotée d’une liberté de suggestion. Certaines de ses suggestions sont pré sélectionnées, notamment avec la liste de lectures croisées proposées. D’autres suggestions sont automatiquement générées, l’algorithme étant utilisé comme un outil ludique afin d’initier des connections et d’explorer la dimensionnalité d’un mot unique.</p>
<br>
{% if analytics.stemmer %}
<div id="similars" class="analytics stemmer">
<h2>Similaire mais différent de <em>{{ query }}</em>:</h2>
{% for word in analytics.stemmer %}
<strong class="word"><a href="/{{ lang}}/?q={{ word }}">{{ word }}</a></strong>
{% endfor%}
</div>
{% endif%}
</div> <!-- #analytics -->
{% endif%}
{% endblock %}
{% block suggestions %}
<div id="suggestions">
<div class="suggestions techfem">
<h1>Suggestions de lectures croisées:</h1>
{% for word in suggestions %}
<strong class="query"><a href="?q={{word}}">{{ word.strip() }}</a></strong>
{% endfor %}
</div>
{% if analytics.mappings %}
<br>
<div class="suggestions tfidf">
<h1>Suggestions du mapping de contraste (top 50 TF-IDF résultats) à <em>{{ results[0]['name']}}</em>: <br></h1>
<div class="">
{% for tfidf, word in analytics.mappings[:50] %}
{% if word == query %}
<strong class="word" style="font-size:{{ 100 + tfidf }}%;">{{ word }}</strong>
{% else %}
<strong class="word" style="font-size:{{ 100 + tfidf }}%;"><a href="/{{ lang}}/?q={{ word }}">{{ word }}</a></strong>
{% endif%}
{% endfor%}
</div>
<br>
<div><a class="contrast" href="/{{ lang}}/mappings/{{ results[0]['filename'].strip() | replace('.txt','') }}">Pour approfondir l’exploration du mapping de contraste appliqué au texte entier (<em>{{ results[0]['name'].strip() }}</em>) par ici ◐.</a></div>
<br>
<br>
</div>
{% endif%}
</div>
<!--
<h1>Request conditional fragments:</h1>
<div id="suggestions">
{% for word in conditionals %}
<strong class="query"><a href="?q={{ word }}&conditional=True">{{ word.strip() }}</a></strong>
{% endfor %}
</div> -->
{% endblock %}

27
templates/fr/results_lists.html

@ -0,0 +1,27 @@
{% extends "fr/base.html" %}
{% block title %}{{query}}{% endblock %}
{% block txtlist %}
<div id="more-descr">[<span class="asterix">*</span>] The algorithm is asked to not return more than 3 sentences from the same manifesto. There are more sentences that match this search query!</div>
{% endblock %}
{% block results %}
<h1>The results for the query "{{query}}" are:</h1>
<div id="results" class="lists">
{% if results == {} %}
<div>That word is not used in any of the manifesto's.</div>
{% else %}
{% for _, manifesto in results.items() %}
<span class="result">
<h2 class="title">{{manifesto.name}}</h2>
<span class="list">
{% for i in range(manifesto.tf) %}
<strong style="font-size:{{manifesto.tfidf * 10000}}px;">{{query}}</strong>
{% endfor %}
</span>
</span>
{% endfor %}
<hr>
{% endif %}
</div>
{% endblock %}

124
tfidf.py

@ -0,0 +1,124 @@
import os, json, re
from math import log, exp
import nltk
from nltk import sent_tokenize
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+') # initialize tokenizer
import pprint
pp = pprint.PrettyPrinter(indent=4)
def tfidf(query, words, corpus):
# Term Frequency
tf_count = 0
for word in words:
if query == word:
tf_count += 1
tf = tf_count/len(words)
# print('TF count:', tf_count)
# print('Total number of words:', len(words))
# print('TF - count/total', tf_count/len(words))
# Inverse Document Frequency
idf_count = 0
for words in corpus:
if query in words:
idf_count += 1
# print('count:', idf_count)
idf = log(len(corpus)/idf_count)
# print('Total number of documents:', len(corpus))
# print('documents/count', len(corpus)/idf_count)
# print('IDF - log(documents/count)', log(len(corpus)/idf_count))
tfidf_value = tf * idf
# print('TF-IDF:', tfidf_value)
return tf_count, tf_count, tfidf_value
def get_language(manifesto):
language = re.search(r'\[.*\]', manifesto, flags=re.IGNORECASE).group().replace('[','').replace(']','').lower()
return language
def load_text_files():
files = []
corpus = {}
sentences = {}
wordlists = {}
languages = {}
dir = 'txt'
for manifesto in sorted(os.listdir(dir)):
manifesto = manifesto.replace('.txt','')
# print('Manifesto:', manifesto)
language = get_language(manifesto)
if language == 'en+de+nl+fr': # exception for OBN manifesto
language = 'en'
languages[manifesto] = language
# print('Language:', language)
lines = open('{}/{}.txt'.format(dir, manifesto), "r").read() # list of lines in .txt file
lines = lines.replace('', '. ') # turn custom linebreaks into full-stops to let the tokenizer recognize them as end-of-lines
words = [word for word in tokenizer.tokenize(lines)] # all words of one manifesto, in reading order
wordlists[manifesto] = words
if not language in corpus.keys():
corpus[language] = []
corpus[language].append(words)
s = sent_tokenize(lines)
sentences[manifesto] = s
files.append(manifesto) # list of filenames
print('\n*txt files loaded*')
return files, corpus, sentences, wordlists, languages
def make_human_readable_name(manifesto):
year = re.match(r'^\d\d\d\d', manifesto).group()
name = manifesto.replace(year, '').replace('_', ' ').replace('-', ' ')
humanreadablename = '{} ({})'.format(name, year)
return humanreadablename
def create_index():
files, corpus, sentences, wordlists, languages = load_text_files()
index = {}
# index = {
# Fem manifesto : {
# 'tfidf' : {
# 'aap': 39.2,
# 'beer': 20.456,
# 'citroen': 3.21
# },
# 'tf' : {
# 'aap': 4,
# 'beer': 6,
# 'citroen': 2
# },
# 'name': 'Feminist Manifesto (2000)',
# 'language': 'en'
# }
# }
for manifesto in files:
print('---------')
print('Manifesto:', manifesto)
index[manifesto] = {}
index[manifesto]['sentences'] = sentences[manifesto]
language = languages[manifesto]
words = wordlists[manifesto]
for word in words:
tf_count, idf_count, tfidf_value = tfidf(word, words, corpus[language])
if 'tfidf' not in index[manifesto]:
index[manifesto]['tfidf'] = {}
index[manifesto]['tfidf'][word] = tfidf_value
# if 'tf' not in index[manifesto]:
# index[manifesto]['tf'] = {}
# index[manifesto]['tf'][word] = tf_count
index[manifesto]['name'] = make_human_readable_name(manifesto)
index[manifesto]['language'] = language
with open('index.json','w+') as out:
out.write(json.dumps(index, indent=4, sort_keys=True))
out.close()
print('*index created*')
# create_index()

52
words.txt

@ -0,0 +1,52 @@
différence
feminist
noise
hack
machine
code
local
error
virus
radical
cyborg
réseau
personal
technology
technologie
clitoris
matrix
difference
DIY
binary
hybrid
grrrl
girls
open
free
tools
fight
conflict
software
hardware
gender
network
connection
cyberspace
alien
imagination
future
emancipatory
banana
fiction
virtual
capitalism
bodies
glitch
power
automation
agency
travail
genre
intimate
Loading…
Cancel
Save