Update app.py
Browse files
app.py
CHANGED
@@ -39,8 +39,8 @@ with open('info.txt', 'r') as file:
|
|
39 |
database_tag = database_content
|
40 |
|
41 |
|
42 |
-
template = "Message: {message}\n\nConversation History: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBitcoin history from 1-jan-2024 to today: {database_tag}\n\nYour system: {json_data}.\n\nResponse:"
|
43 |
-
prompt = PromptTemplate(template=template, input_variables=["message","history", "date_time", "bitcoin_price", "database_tag", "json_data"])
|
44 |
conversation_history = []
|
45 |
|
46 |
MAX_HISTORY_LENGTH = 55
|
@@ -77,6 +77,8 @@ def submit():
|
|
77 |
|
78 |
doc = nlp(user_input)
|
79 |
tokens = [token.text for token in doc]
|
|
|
|
|
80 |
|
81 |
# Add Spacy NLP processing here
|
82 |
|
@@ -97,7 +99,7 @@ def submit():
|
|
97 |
history_stemmed_tokens = [ps.stem(token) for token in history_tokens]
|
98 |
history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
|
99 |
|
100 |
-
model_input = prompt.format(message=user_input, history="<br>".join(conversation_history), database_tag=database_content, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data,history_tokens=history_tokens,history_stemmed_tokens=history_stemmed_tokens,history_lemmatized_tokens=history_lemmatized_tokens)
|
101 |
|
102 |
response = llm(model_input, context="<br>".join(conversation_history))
|
103 |
|
|
|
39 |
database_tag = database_content
|
40 |
|
41 |
|
42 |
+
template = "Message: {message}\n\nSentiment Analysis: {sentiment}\n\nConversation History: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBitcoin history from 1-jan-2024 to today: {database_tag}\n\nYour system: {json_data}.\n\nResponse:"
|
43 |
+
prompt = PromptTemplate(template=template, input_variables=["message", "sentiment", "history", "date_time", "bitcoin_price", "database_tag", "json_data"])
|
44 |
conversation_history = []
|
45 |
|
46 |
MAX_HISTORY_LENGTH = 55
|
|
|
77 |
|
78 |
doc = nlp(user_input)
|
79 |
tokens = [token.text for token in doc]
|
80 |
+
|
81 |
+
sentiment = TextBlob(user_input).sentiment
|
82 |
|
83 |
# Add Spacy NLP processing here
|
84 |
|
|
|
99 |
history_stemmed_tokens = [ps.stem(token) for token in history_tokens]
|
100 |
history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
|
101 |
|
102 |
+
model_input = prompt.format(message=user_input, sentiment=sentiment, history="<br>".join(conversation_history), database_tag=database_content, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data,history_tokens=history_tokens,history_stemmed_tokens=history_stemmed_tokens,history_lemmatized_tokens=history_lemmatized_tokens)
|
103 |
|
104 |
response = llm(model_input, context="<br>".join(conversation_history))
|
105 |
|