Update app.py
Browse files
app.py
CHANGED
@@ -37,8 +37,8 @@ with open('ai_chatbot_data.json', 'r') as file:
|
|
37 |
json_data = json.load(file)
|
38 |
|
39 |
# Updated prompt template for Bitcoin trading
|
40 |
-
template = "User Message: {message}\n\nUser Sentiment: {sentiment}\n\nConversation now: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBTC price from 1 jan 2024 to {date_time} (DATE,OPEN,HIGH,LOW,CLOSE,ADJ CLOSE,VOLUME<br/>): {
|
41 |
-
prompt = PromptTemplate(template=template, input_variables=["message", "sentiment", "history", "date_time", "bitcoin_price", "
|
42 |
conversation_history = []
|
43 |
|
44 |
MAX_HISTORY_LENGTH = 55
|
@@ -113,7 +113,7 @@ def submit():
|
|
113 |
history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
|
114 |
|
115 |
model_input = prompt.format(message=user_input, sentiment=sentiment, history="\n".join(conversation_history),
|
116 |
-
|
117 |
|
118 |
response = llm(model_input, context="<br>".join(conversation_history))
|
119 |
|
@@ -143,7 +143,7 @@ if __name__ == "__main__":
|
|
143 |
repo_id=repo_id,
|
144 |
model_kwargs={
|
145 |
"temperature": 0.5,
|
146 |
-
"max_new_tokens":
|
147 |
"top_p": 0.5,
|
148 |
"repetition_penalty": 1.2,
|
149 |
"num_beams": 3,
|
|
|
37 |
json_data = json.load(file)
|
38 |
|
39 |
# Updated prompt template for Bitcoin trading
|
40 |
+
template = "User Message: {message}\n\nUser Sentiment: {sentiment}\n\nConversation now: {history}\n\nDate and Time: {date_time}\n\nBitcoin Price: ${bitcoin_price}\n\nBTC price from 1 jan 2024 to {date_time} (DATE,OPEN,HIGH,LOW,CLOSE,ADJ CLOSE,VOLUME<br/>): {result}\n\nAI System Data: {json_data}\n\nResponse:"
|
41 |
+
prompt = PromptTemplate(template=template, input_variables=["message", "sentiment", "history", "date_time", "bitcoin_price", "result", "json_data"])
|
42 |
conversation_history = []
|
43 |
|
44 |
MAX_HISTORY_LENGTH = 55
|
|
|
113 |
history_lemmatized_tokens = [lemmatizer.lemmatize(token) for token in history_tokens]
|
114 |
|
115 |
model_input = prompt.format(message=user_input, sentiment=sentiment, history="\n".join(conversation_history),
|
116 |
+
result=result, date_time=current_time, bitcoin_price=bitcoin_price, json_data=json_data)
|
117 |
|
118 |
response = llm(model_input, context="<br>".join(conversation_history))
|
119 |
|
|
|
143 |
repo_id=repo_id,
|
144 |
model_kwargs={
|
145 |
"temperature": 0.5,
|
146 |
+
"max_new_tokens": 512,
|
147 |
"top_p": 0.5,
|
148 |
"repetition_penalty": 1.2,
|
149 |
"num_beams": 3,
|