cyber/technofeminist cross-reader
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123 lines
3.8 KiB

#!/usr/bin/env python3
import readings
from pprint import pprint
import re, sys
from escpos import escpos
from sys import stdin, stderr, stdout
from time import sleep
import nltk
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+') # initialize tokenizer
try:
printerpath = '/dev/usb/lp0'
stdout = open(printerpath, 'w')
connected = True
print('*receipt printer connected*')
except:
stdout = sys.__stdout__
connected = False
print('*no receipt printer connected*')
def insert_linebreaks(string, linelength):
words = tokenizer.tokenize(string)
count = 0
tmp = ''
new = ''
for word in words:
length = len(word)
count += len(word)
if word == words[-1]:
tmp += word
new += tmp
elif count <= linelength:
tmp += word + ' '
else:
new += tmp + '\n'
tmp = ''
count = 0
return new
def printNow(query, results, results_count):
if connected == True:
print(escpos['reset'], file=stdout, flush=True)
print(escpos['init_printer'], file=stdout, flush=True)
# print(escpos['select_international_character_set'], file=stdout, flush=True)
print(escpos['margin_left'], file=stdout, flush=True)
print(escpos['justify_center'], file=stdout, flush=True)
printed = False
if results_count == None:
results_count = 0
results_count_max = results_count + 5
current_count = 0
for x, document in results.items():
for n, sentence in enumerate(document['matches']):
print('current_count', current_count)
if current_count == results_count:
printed = True
if results_count < results_count_max:
print('results_count', results_count)
results_count += 1
sentence = str(sentence) # Turn Flask Markup object back to normal python string
sentence = sentence.replace('\n', ' ').replace(' ', ' ')
pattern = r'[\s\W\_]'+query+r'[\s\W\_\n]|^'+query+'|'+query+'$'
match = re.search(pattern, sentence, flags=re.IGNORECASE)
# print('match:', match)
if match:
match = match.group()
sentence_splitted = re.compile(match).split(sentence) # find the query in the sentence, and split the sentence up
# print('sentence splitted:', sentence_splitted)
for i, part in enumerate(sentence_splitted):
if n == 0: # first sentence of this document
if i == 0: # start of a new sentence
print('', file=stdout, flush=True)
linebreak = ''' **
** /** **
//** /** **
**************
///**//**//**/
** /** //**
// /** //
// '''
print(linebreak, file=stdout, flush=True) # print line break ascii art
print('\n', file=stdout, flush=True)
title = insert_linebreaks(document['name'], 40)
print(title, file=stdout, flush=True) # print the document name
print('\n', file=stdout, flush=True)
# start of sentence
part = insert_linebreaks(part, 40)
print(part, file=stdout, flush=True) # print current part of sentence
if i + 1 != len(sentence_splitted): # if this part is not the last one
print(escpos['bold'], match, escpos['reset'], file=stdout, flush=True) # print query
# line breaker after each sentence
if i + 1 == len(sentence_splitted):
print('\n | \n', file=stdout, flush=True)
sleep(3)
current_count += 1
print(escpos['paperfeed_1l'], file=stdout, flush=True)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
print(escpos['paperfeed_1l'], file=stdout, flush=True)
sleep(3)
print(escpos['papercut'], file=stdout, flush=True)
print(escpos['reset'], file=stdout, flush=True)
# reset when all results are printed
if printed == False:
results_count = 0
# printNow(query, results, results_count)
return results_count
# query = 'noise'
# results, _, _ = readings.request_results(query)
# pprint(results)
# printNow(query, results, 0)