You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

171 lines
5.8 KiB
Python

import csv
import spacy
# opening the CSV file
def call_trees(file):
data = []
with open(file, mode ='r') as source:
# reading the CSV file
csvFile = csv.DictReader(source)
# displaying the contents of the CSV fil
for line in csvFile:
data.append(line)
return data
# open textfile
def open_dreams(textfile):
with open(textfile, mode ='r') as dreams:
dreaming = dreams.read()
return dreaming
def part_of_speech(text):
#load core english library
nlp = spacy.load("en_core_web_sm")
# import text in spacy
doc = nlp(text)
# Token and Tag
tokens = []
for token in doc:
tokens.append(token)
# You want list of Verb tokens
verbs = [token.text for token in doc if token.pos_ == "VERB"]
nouns = [token.text for token in doc if token.pos_ == "NOUN"]
adjectives = [token.text for token in doc if token.pos_ == "ADJ"]
articles = adjectives = [token.text for token in doc if token.pos_ == "DET"]
return doc, tokens, verbs, nouns, adjectives, articles
def convert_token_to_text(doc):
new_text = []
for i in doc:
new_text.append(''+i.text+'')
return new_text
def replace_pos(text, pos_list):
position = 0
for word in text:
if word in pos_list:
length = len(word)
replacement = length*" "
text[position]= replacement
position +=1
return text
def highlight_word(text, pos_list):
position = 0
for word in text:
if word in pos_list:
replacement = "**"+word.upper()+"**"
text[position]= replacement
position +=1
return text
def in_between(text, pos_list):
position = 0
for word in text:
if word in pos_list:
replacement = " !* "+word + " $& "
text[position]= replacement
position +=1
return text
# # distortion individual trees when ration <1
# def strong_distortion(text):
# vowels = ["a", "e", "i", "o", "u"]
# for letter in text:
# if letter in vowels:
# text = text.replace(letter, "!")
# return text
# if you call script by its own name, it will execute the commands\
# otherwise it will only activate the functions to be called elsewhere
if __name__ == '__main__':
# --------------
# create list of dictionaries with Forest, ha, Tree_1, Tree_2, Tree_3, CO2 in tonnes, ratio
file = "data_trees.csv"
data = call_trees(file)
#print(data)
# open file with dreams
textfile = "dreams_selection_till_p47.txt"
dreaming = open_dreams(textfile)
#print(dreaming)
# choose location and tree
print("These are locations where you can catch dreams near trees: \n")
nr = 0
for element in data:
print(str(nr) + ' --- ' + element['Forest'])
nr += 1
print("\n")
location = input("Where do you want to receive your dream? Type a number: \n")
print("Thanks for choosing "+ data[int(location)]['Forest']+".\n")
print("There are 3 trees that offer dream services: \n")
print("1" + ' --- ' + data[int(location)]['Tree_1'])
print("2" + ' --- ' + data[int(location)]['Tree_2'])
print("3" + ' --- ' + data[int(location)]['Tree_3'])
print("\n")
tree_number = input("Which tree do you prefer to dream with? Type a number: \n")
selected_tree = "Tree_"+tree_number
dream_tree = data[int(location)][selected_tree]
print("\n_________________________________\n")
# Printing text
title = "Dreaming with "+ dream_tree +" in "+ data[int(location)]['Forest']
print(title)
print("_________________________________\n")
selected_description = selected_tree + "_description"
description_tree = data[int(location)][selected_description]
print(description_tree)
print("\n_________________________________\n")
template_text = "Trees absorb CO2 from the air and generate oxygen. \
In forests and parks, the concentration of trees is high and CO2 emissions are often low,\
because traffic is inexistant. The air quality for humans is therefore generally better.\
The hypothesis exists that the high concentration of trees allows for more lucid dreaming.\
It is said as well that a personal relationship to a tree can enhance lucid dreaming as well,\
even if the tree is living in an urban area."
print(template_text)
print("\n_________________________________\n")
doc, tokens, verbs, nouns, adjectives, articles = part_of_speech(dreaming)
# print("Verbs:", verbs)
# print("Nouns:", nouns)
# print("Adjectives:", adjectives)
# print("Articles:", articles)
# convert spacy token objects to strings in list
tokenized_text = convert_token_to_text(doc)
if float(data[int(location)]["CO2"]) < 1:
# if ration is <1
# replace verbs by spaces of same length
article_text = replace_pos(tokenized_text, articles)
final_text = replace_pos(article_text, nouns)
print("ENJOY YOUR DREAMS ! \n")
print("\n_________________________________\n")
print(" ".join(final_text))
print("\n_________________________________\n")
elif float(data[int(location)]["CO2"]) > 110000:
verb_text = highlight_word(tokenized_text, verbs)
final_text = highlight_word(verb_text, nouns)
print("ENJOY YOUR DREAMS ! \n")
print("\n_________________________________\n")
print(" ".join(final_text))
print("\n_________________________________\n")
else:
final_text = in_between(tokenized_text, nouns)
print("ENJOY YOUR DREAMS ! \n")
print("\n_________________________________\n")
print(" ".join(final_text))
print("\n_________________________________\n")
# create 3 dream convertors depending on where the trees are located
# if ratio is < 1, , > 500000