Spaces:
Sleeping
Sleeping
James MacQuillan
commited on
Commit
·
714c540
1
Parent(s):
89fe315
push
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
from typing import final
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
import json
|
@@ -6,7 +5,6 @@ from bs4 import BeautifulSoup
|
|
6 |
import requests
|
7 |
from huggingface_hub import InferenceClient
|
8 |
|
9 |
-
|
10 |
# Define global variables
|
11 |
BOT_AVATAR = 'https://automatedstockmining.org/wp-content/uploads/2024/08/south-west-value-mining-logo.webp'
|
12 |
hf_token = os.getenv("HF_TOKEN")
|
@@ -93,29 +91,36 @@ def search(query):
|
|
93 |
return all_results
|
94 |
|
95 |
def process_query(user_input, history):
|
96 |
-
gr.Info('
|
97 |
-
#
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
messages.append({'role': 'user', 'content': user_input})
|
105 |
-
|
106 |
-
# Perform the web search based on user input
|
107 |
-
search_results = search(user_input)
|
108 |
-
search_results_str = json.dumps(search_results)
|
109 |
-
|
110 |
|
111 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
response = client.chat_completion(
|
113 |
model="Qwen/Qwen2.5-72B-Instruct",
|
114 |
-
messages=[{"role": "user", "content": f"YOU ARE IM.S, AN INVESTMENT CHATBOT BUILT BY automatedstockmining.org. Answer the user's request '{user_input}' using the following information: {search_results_str} and the chat history
|
115 |
max_tokens=3000,
|
116 |
stream=True
|
117 |
)
|
118 |
|
|
|
119 |
final_response = ""
|
120 |
for chunk in response:
|
121 |
content = chunk.choices[0].delta.content or ''
|
@@ -128,6 +133,7 @@ theme = gr.themes.Citrus(
|
|
128 |
)
|
129 |
|
130 |
examples = [
|
|
|
131 |
["whats the trending social sentiment like for Nvidia"],
|
132 |
["What's the latest news on Cisco Systems stock"],
|
133 |
["Analyze technical indicators for Adobe, are they presenting buy or sell signals"],
|
@@ -152,6 +158,7 @@ examples = [
|
|
152 |
["What is the latest guidance on revenue for Meta?"],
|
153 |
["What is the current beta of Amazon stock and how does it compare to the industry average?"],
|
154 |
["What are the top-rated ETFs for technology exposure this quarter?"]
|
|
|
155 |
]
|
156 |
|
157 |
chatbot = gr.Chatbot(
|
@@ -159,12 +166,14 @@ chatbot = gr.Chatbot(
|
|
159 |
avatar_images=[None, BOT_AVATAR],
|
160 |
show_copy_button=True,
|
161 |
layout="panel",
|
162 |
-
height
|
163 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
)
|
165 |
-
|
166 |
-
|
167 |
-
fn=process_query,
|
168 |
-
chatbot=chatbot,
|
169 |
-
examples=examples,
|
170 |
-
).launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import json
|
|
|
5 |
import requests
|
6 |
from huggingface_hub import InferenceClient
|
7 |
|
|
|
8 |
# Define global variables
|
9 |
BOT_AVATAR = 'https://automatedstockmining.org/wp-content/uploads/2024/08/south-west-value-mining-logo.webp'
|
10 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
91 |
return all_results
|
92 |
|
93 |
def process_query(user_input, history):
|
94 |
+
gr.Info('ℹ️📈 interpreting your request',duration = 4)
|
95 |
+
# Accumulate streamed content from the initial request
|
96 |
+
stream_search = client.chat_completion(
|
97 |
+
model="Qwen/Qwen2.5-72B-Instruct",
|
98 |
+
messages=[{"role": "user", "content": f"Based on this chat history {history} and the user's request '{user_input}', suggest a Google search term in a single line without assuming any specific dates (use 'this year', 'this month', etc.)"}],
|
99 |
+
max_tokens=400,
|
100 |
+
stream=True
|
101 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
+
# Accumulate content from the streamed response
|
104 |
+
streamed_search_query = ""
|
105 |
+
for chunk in stream_search:
|
106 |
+
content = chunk.choices[0].delta.content or ''
|
107 |
+
streamed_search_query += content
|
108 |
+
|
109 |
+
print("Search Query:", streamed_search_query) # Debugging: Check the final search term
|
110 |
+
|
111 |
+
# Perform the web search based on the accumulated query
|
112 |
+
search_results = search(streamed_search_query)
|
113 |
+
search_results_str = json.dumps(search_results)
|
114 |
+
gr.Info('ℹ️📈 thinking...', duration = 3.5)
|
115 |
+
# Create the response request with HuggingFace using search results
|
116 |
response = client.chat_completion(
|
117 |
model="Qwen/Qwen2.5-72B-Instruct",
|
118 |
+
messages=[{"role": "user", "content": f"YOU ARE IM.S, AN INVESTMENT CHATBOT BUILT BY automatedstockmining.org. Answer the user's request '{user_input}' using the following information: {search_results_str} and the chat history {history}. Provide a concise, direct answer in no more than 2-3 sentences, with appropriate emojis. If the user asks for a detailed analysis, generate up to 3000 tokens analyzing all trends and patterns. IF THE USER ASKS YOU WHO YOU ARE, JUST SAY THAT YOU ARE IM.S BUILT BY automatedstockmining.org"}],
|
119 |
max_tokens=3000,
|
120 |
stream=True
|
121 |
)
|
122 |
|
123 |
+
# Accumulate the final response and stream it
|
124 |
final_response = ""
|
125 |
for chunk in response:
|
126 |
content = chunk.choices[0].delta.content or ''
|
|
|
133 |
)
|
134 |
|
135 |
examples = [
|
136 |
+
|
137 |
["whats the trending social sentiment like for Nvidia"],
|
138 |
["What's the latest news on Cisco Systems stock"],
|
139 |
["Analyze technical indicators for Adobe, are they presenting buy or sell signals"],
|
|
|
158 |
["What is the latest guidance on revenue for Meta?"],
|
159 |
["What is the current beta of Amazon stock and how does it compare to the industry average?"],
|
160 |
["What are the top-rated ETFs for technology exposure this quarter?"]
|
161 |
+
|
162 |
]
|
163 |
|
164 |
chatbot = gr.Chatbot(
|
|
|
166 |
avatar_images=[None, BOT_AVATAR],
|
167 |
show_copy_button=True,
|
168 |
layout="panel",
|
169 |
+
height=700
|
170 |
+
)
|
171 |
+
|
172 |
+
chat_interface = gr.ChatInterface(
|
173 |
+
theme=theme,
|
174 |
+
fn=process_query,
|
175 |
+
chatbot=chatbot,
|
176 |
+
examples=examples,
|
177 |
)
|
178 |
+
|
179 |
+
chat_interface.launch()
|
|
|
|
|
|
|
|