James MacQuillan commited on
Commit
4aa0489
·
1 Parent(s): 7502b02
Files changed (1) hide show
  1. app.py +23 -7
app.py CHANGED
@@ -29,6 +29,18 @@ huggingface_ef = embedding_functions.HuggingFaceEmbeddingFunction(
29
  api_key=hf_token,
30
  model_name="sentence-transformers/all-MiniLM-L6-v2"
31
  )
 
 
 
 
 
 
 
 
 
 
 
 
32
  embedding_model = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
33
  # Define global variables
34
  BOT_AVATAR = 'https://automatedstockmining.org/wp-content/uploads/2024/08/south-west-value-mining-logo.webp'
@@ -188,7 +200,7 @@ def process_query(user_input, history):
188
  # Step 1: Generate a search term based on the user query
189
  stream_search = client.chat_completion(
190
  model="Qwen/Qwen2.5-72B-Instruct",
191
- messages=[{"role": "user", "content": f"Based on this chat history {history} the user's request '{user_input}', and this vector database {search_texts}, suggest a Google search term in a single line without specific dates; use 'this year', 'this month', etc. INCLUDE NOTHING IN YOUR RESPONSE EXCEPT THE RELEVANT SEARCH RESULT. EXAMPLE: USER: WHAT IS THE CURRENT PRICE OF COCA COLA STOCK. YOUR RESPONSE: WHAT IS THE CURRENT PRICE OF COCA COLA STOCK"}],
192
  max_tokens=400,
193
  stream=True
194
  )
@@ -211,7 +223,7 @@ def process_query(user_input, history):
211
  # Step 3: Generate a response using the search results
212
  response = client.chat_completion(
213
  model="Qwen/Qwen2.5-72B-Instruct",
214
- messages=[{"role": "user", "content": f"Using the search results: {search_results_str} and chat history {history}, this vector database on health checks {retrieved_texts} answer the user's query '{user_input}' in a concise, precise way, using numerical data if available. ONLY GIVE ONE RESPONSE BACK, CONCISE OR DETAILED BASED ON THE USERS INPUT. "}],
215
  max_tokens=3000,
216
  stream=True
217
  )
@@ -226,8 +238,9 @@ def process_query(user_input, history):
226
  yield final_response
227
 
228
  theme = gr.themes.Citrus(
229
- primary_hue="blue",
230
- neutral_hue="slate",
 
231
  )
232
 
233
  examples = [
@@ -258,17 +271,20 @@ examples = [
258
  ]
259
 
260
  chatbot = gr.Chatbot(
261
- label="IM.S",
262
  avatar_images=[None, BOT_AVATAR],
263
  show_copy_button=True,
264
  layout="panel",
265
  height=700
266
  )
267
- theme = gr.themes.Ocean()
268
  with gr.Blocks(theme=theme) as demo:
269
  with gr.Column():
270
  gr.Markdown("## quantineuron.com: IM.analyst - Building the Future of Investing")
271
-
 
 
 
272
  with gr.Column(scale=3, min_width=600):
273
  chat_interface = gr.ChatInterface(
274
  fn=process_query,
 
29
  api_key=hf_token,
30
  model_name="sentence-transformers/all-MiniLM-L6-v2"
31
  )
32
+
33
+ #get their investor type
34
+ investor_type_value = ""
35
+
36
+ # Function to set the investor type
37
+ def set_investor_type(investor_type):
38
+ global investor_type_value
39
+ investor_type_value = investor_type
40
+ return f"Investor type set to: {investor_type}"
41
+
42
+
43
+
44
  embedding_model = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
45
  # Define global variables
46
  BOT_AVATAR = 'https://automatedstockmining.org/wp-content/uploads/2024/08/south-west-value-mining-logo.webp'
 
200
  # Step 1: Generate a search term based on the user query
201
  stream_search = client.chat_completion(
202
  model="Qwen/Qwen2.5-72B-Instruct",
203
+ messages=[{"role": "user", "content": f"Based on this chat history {history} the user's request '{user_input}', and this vector database {search_texts}, suggest a Google search term in a single line without specific dates; use 'this year', 'this month', etc. INCLUDE NOTHING IN YOUR RESPONSE EXCEPT THE RELEVANT SEARCH RESULT. EXAMPLE: USER: WHAT IS THE CURRENT PRICE OF COCA COLA STOCK. YOUR RESPONSE: WHAT IS THE CURRENT PRICE OF COCA COLA STOCK. here is their investor type, you can use it to specialise searches for their investor type{investor_type_value}"}],
204
  max_tokens=400,
205
  stream=True
206
  )
 
223
  # Step 3: Generate a response using the search results
224
  response = client.chat_completion(
225
  model="Qwen/Qwen2.5-72B-Instruct",
226
+ messages=[{"role": "user", "content": f"Using the search results: {search_results_str} and chat history {history}, this vector database on health checks {retrieved_texts} answer the user's query '{user_input}' in a concise, precise way, using numerical data if available. GIVE DETAILED RESPONSES LIKE A STOCK ANALYST. THEY HAVE TOLD US HOW THEY INVEST SO WHEN YOU ANALYSE THINGS AND EXPLAIN, TAILOR IT TO THEM, AND REFERENCE THEIR TYPE IN YOUR ANSWERS - {investor_type_value}"}],
227
  max_tokens=3000,
228
  stream=True
229
  )
 
238
  yield final_response
239
 
240
  theme = gr.themes.Citrus(
241
+ primary_hue="indigo",
242
+ secondary_hue="indigo",
243
+ neutral_hue="zinc",
244
  )
245
 
246
  examples = [
 
271
  ]
272
 
273
  chatbot = gr.Chatbot(
274
+ label="IM.analyst",
275
  avatar_images=[None, BOT_AVATAR],
276
  show_copy_button=True,
277
  layout="panel",
278
  height=700
279
  )
280
+
281
  with gr.Blocks(theme=theme) as demo:
282
  with gr.Column():
283
  gr.Markdown("## quantineuron.com: IM.analyst - Building the Future of Investing")
284
+ investor_type_input = gr.Textbox(label="tell IM.analyst about how you invest", placeholder="Enter your investment style", interactive=True)
285
+ set_type_button = gr.Button("Set Investor Type")
286
+ set_type_button.click(set_investor_type, inputs=investor_type_input, outputs=None)
287
+
288
  with gr.Column(scale=3, min_width=600):
289
  chat_interface = gr.ChatInterface(
290
  fn=process_query,