cyber/technofeminist cross-reader
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

103 lines
3.2 KiB

#!/usr/bin/env python3
import readings
from pprint import pprint
import re, sys
from escpos import escpos
from sys import stdin, stderr, stdout
from time import sleep
import nltk
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+') # initialize tokenizer
try:
printerpath = '/dev/usb/lp0'
stdout = open(printerpath, 'w')
connected = True
print('*receipt printer connected*')
except:
stdout = sys.__stdout__
connected = False
print('*no receipt printer connected*')
def insert_linebreaks(string, linelength):
words = tokenizer.tokenize(string)
count = 0
tmp = ''
new = ''
for word in words:
length = len(word)
count += len(word)
if word == words[-1]:
tmp += word
new += tmp
elif count <= linelength:
tmp += word + ' '
else:
new += tmp + '\n'
tmp = ''
count = 0
return new
def printNow(query, results):
if connected == True:
print(escpos['reset'], file=stdout, flush=True)
print(escpos['init_printer'], file=stdout, flush=True)
print(escpos['select_international_character_set'], file=stdout, flush=True)
print(escpos['margin_left'], file=stdout, flush=True)
print(escpos['justify_center'], file=stdout, flush=True)
for x, document in results.items():
for n, sentence in enumerate(document['matches']):
sentence = str(sentence) # Turn Flask Markup object back to normal python string
sentence = sentence.replace('\n', ' ').replace(' ', ' ')
pattern = r'[\s\W\_]'+query+r'[\s\W\_\n]|^'+query+'|'+query+'$'
match = re.search(pattern, sentence, flags=re.IGNORECASE)
# print('match:', match)
if match:
match = match.group()
sentence_splitted = re.compile(match).split(sentence) # find the query in the sentence, and split the sentence up
# print('sentence splitted:', sentence_splitted)
for i, part in enumerate(sentence_splitted):
if n == 0: # first sentence of this document
if i == 0: # start of a new sentence
print('', file=stdout, flush=True)
linebreak = ''' **
** /** **
//** /** **
**************
///**//**//**/
** /** //**
// /** //
// '''
print(linebreak, file=stdout, flush=True) # print line break ascii art
print('\n', file=stdout, flush=True)
title = insert_linebreaks(document['name'], 46)
print(title, file=stdout, flush=True) # print the document name
print('\n', file=stdout, flush=True)
# start of sentence
part = insert_linebreaks(part, 46)
print(part, escpos['bold'], file=stdout, flush=True) # print current part of sentence
if i + 1 != len(sentence_splitted): # if this part is not the last one
print(match, escpos['reset'], file=stdout, flush=True) # print query
# line breaker after each sentence
if i + 1 == len(sentence_splitted):
print('\n | \n', file=stdout, flush=True)
sleep(3)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
sleep(3)
print(escpos['papercut'], file=stdout, flush=True)
print(escpos['reset'], file=stdout, flush=True)
# query = 'noise'
# results, _, _ = readings.request_results(query)
# pprint(results)
# printNow(query, results)