|
@ -51,11 +51,12 @@ def analysis(the_word, file_name): |
|
|
content = f.read() |
|
|
content = f.read() |
|
|
sent_tokens = sent_tokenize(content) |
|
|
sent_tokens = sent_tokenize(content) |
|
|
new_sent_tokens = [] |
|
|
new_sent_tokens = [] |
|
|
|
|
|
# the_word = the_word.lower() |
|
|
re_word = r"\b" + re.escape(the_word) + r"\b" |
|
|
re_word = r"\b" + re.escape(the_word) + r"\b" |
|
|
# print(re_word) |
|
|
# print(re_word) |
|
|
# print(the_word) |
|
|
# print(the_word) |
|
|
for sent_token in sent_tokens: |
|
|
for sent_token in sent_tokens: |
|
|
if re.search(re_word, sent_token, re.IGNORECASE): |
|
|
if re.search(re_word, sent_token): |
|
|
new_sent_tokens.append({'id': id, 'sentence': sent_token.replace('\n', ' ').strip("'<>()“”")}) |
|
|
new_sent_tokens.append({'id': id, 'sentence': sent_token.replace('\n', ' ').strip("'<>()“”")}) |
|
|
if the_word in sentences_w_word: # if this is not the first iteration |
|
|
if the_word in sentences_w_word: # if this is not the first iteration |
|
|
previous_sent_tokens = sentences_w_word[the_word] |
|
|
previous_sent_tokens = sentences_w_word[the_word] |
|
|