daniilf commited on
Commit
ee10203
verified
1 Parent(s): 3672495

Update pages/FSR_Model.py

Browse files
Files changed (1) hide show
  1. pages/FSR_Model.py +82 -13
pages/FSR_Model.py CHANGED
@@ -181,6 +181,7 @@ def get_classification(client, deployment, user_input):
181
  )
182
 
183
  return completion.choices[0].message.content.strip()
 
184
  def process_input():
185
  try:
186
  current_model = st.session_state.selected_model
@@ -222,9 +223,10 @@ def process_input():
222
  return response.choices[0].message.content.strip()
223
  except Exception as e:
224
  if attempt == max_retries - 1:
225
- raise Exception(f"Failed to get response after {max_retries} attempts: {str(e)}")
226
- st.warning(f"Attempt {attempt + 1} failed, retrying...")
227
  time.sleep(1)
 
228
 
229
  def perform_rag_query(input_text, conversation_history):
230
  try:
@@ -242,8 +244,69 @@ def process_input():
242
  return safe_api_call(model_messages), relevant_docs
243
 
244
  except Exception as e:
245
- st.error(f"Error in RAG query: {str(e)}")
246
- return "Lo siento, hubo un error al procesar tu consulta. Por favor, intenta nuevamente.", ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
  initial_response = None
249
  initial_docs = ""
@@ -354,15 +417,21 @@ def check_document_relevance(query, doc, client):
354
  Documento: {doc}
355
  Respuesta:"""
356
 
357
- response = client.chat.completions.create(
358
- model=deployment,
359
- messages=[{"role": "user", "content": few_shot_prompt}],
360
- max_tokens=3,
361
- temperature=0.1,
362
- top_p=0.9
363
- )
364
-
365
- return response.choices[0].message.content.strip().lower() == "s铆"
 
 
 
 
 
 
366
 
367
  # In retrieve_relevant_documents function
368
  def retrieve_relevant_documents(vectorstore, query, conversation_history, client, top_k=3, score_threshold=0.5):
 
181
  )
182
 
183
  return completion.choices[0].message.content.strip()
184
+
185
  def process_input():
186
  try:
187
  current_model = st.session_state.selected_model
 
223
  return response.choices[0].message.content.strip()
224
  except Exception as e:
225
  if attempt == max_retries - 1:
226
+ # Return user-friendly message instead of raising exception
227
+ return "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
228
  time.sleep(1)
229
+
230
 
231
  def perform_rag_query(input_text, conversation_history):
232
  try:
 
244
  return safe_api_call(model_messages), relevant_docs
245
 
246
  except Exception as e:
247
+ # Use standardized error message
248
+ return "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you.", ""
249
+
250
+ # Update these sections too:
251
+ if model_config.get('uses_classification', False):
252
+ try:
253
+ classification = get_classification(client, deployment, user_input)
254
+
255
+ if 'classifications' not in st.session_state:
256
+ st.session_state.classifications = {}
257
+ st.session_state.classifications[len(st.session_state.messages[current_model]) - 1] = classification
258
+
259
+ if classification == "0":
260
+ initial_response, initial_docs = perform_rag_query(user_input, conversation_history)
261
+ else:
262
+ model_messages = [
263
+ {"role": "system", "content": PERSONA_PREFIX + EMOTIONAL_PROMPT}
264
+ ] + st.session_state.messages[current_model]
265
+ initial_response = safe_api_call(model_messages)
266
+
267
+ except Exception as e:
268
+ # Replace error message with standardized message
269
+ initial_response = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
270
+
271
+ # And also update the RAG models section:
272
+ if model_config.get('uses_rag', False):
273
+ try:
274
+ if not initial_response:
275
+ initial_response, initial_docs = perform_rag_query(user_input, conversation_history)
276
+
277
+ verification_docs = retrieve_relevant_documents(
278
+ st.session_state.vectorstore,
279
+ initial_response,
280
+ conversation_history,
281
+ client=client
282
+ )
283
+
284
+ combined_docs = initial_docs + "\nContexto de verificaci贸n adicional:\n" + verification_docs
285
+
286
+ verification_messages = [
287
+ {
288
+ "role": "system",
289
+ "content": f"Pregunta del paciente:{user_input} \nContexto: {combined_docs} \nRespuesta anterior: {initial_response}\n Verifique la precisi贸n m茅dica de la respuesta anterior y refine la respuesta seg煤n el contexto adicional."
290
+ }
291
+ ]
292
+
293
+ assistant_reply = safe_api_call(verification_messages)
294
+
295
+ except Exception as e:
296
+ # Replace error message with standardized message
297
+ assistant_reply = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
298
+ else:
299
+ try:
300
+ model_messages = [
301
+ {"role": "system", "content": model_config['prompt']}
302
+ ] + st.session_state.messages[current_model]
303
+
304
+ assistant_reply = safe_api_call(model_messages)
305
+
306
+ except Exception as e:
307
+ # Replace error message with standardized message
308
+ assistant_reply = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
309
+
310
 
311
  initial_response = None
312
  initial_docs = ""
 
417
  Documento: {doc}
418
  Respuesta:"""
419
 
420
+ try:
421
+ response = client.chat.completions.create(
422
+ model=deployment,
423
+ messages=[{"role": "user", "content": few_shot_prompt}],
424
+ max_tokens=3,
425
+ temperature=0.1,
426
+ top_p=0.9
427
+ )
428
+
429
+ return response.choices[0].message.content.strip().lower() == "s铆"
430
+ except Exception as e:
431
+ # In case of error, default to false (not relevant)
432
+ print(f"Error in relevance check: {str(e)}")
433
+ return False
434
+
435
 
436
  # In retrieve_relevant_documents function
437
  def retrieve_relevant_documents(vectorstore, query, conversation_history, client, top_k=3, score_threshold=0.5):