daniilf commited on
Commit
fc63097
verified
1 Parent(s): ee10203

Updating to display requested error message

Browse files
Files changed (1) hide show
  1. pages/FS_Model.py +81 -13
pages/FS_Model.py CHANGED
@@ -182,6 +182,7 @@ def get_classification(client, deployment, user_input):
182
  )
183
 
184
  return completion.choices[0].message.content.strip()
 
185
  def process_input():
186
  try:
187
  current_model = st.session_state.selected_model
@@ -223,9 +224,10 @@ def process_input():
223
  return response.choices[0].message.content.strip()
224
  except Exception as e:
225
  if attempt == max_retries - 1:
226
- raise Exception(f"Failed to get response after {max_retries} attempts: {str(e)}")
227
- st.warning(f"Attempt {attempt + 1} failed, retrying...")
228
  time.sleep(1)
 
229
 
230
  def perform_rag_query(input_text, conversation_history):
231
  try:
@@ -243,8 +245,69 @@ def process_input():
243
  return safe_api_call(model_messages), relevant_docs
244
 
245
  except Exception as e:
246
- st.error(f"Error in RAG query: {str(e)}")
247
- return "Lo siento, hubo un error al procesar tu consulta. Por favor, intenta nuevamente.", ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
  initial_response = None
250
  initial_docs = ""
@@ -355,15 +418,20 @@ def check_document_relevance(query, doc, client):
355
  Documento: {doc}
356
  Respuesta:"""
357
 
358
- response = client.chat.completions.create(
359
- model=deployment,
360
- messages=[{"role": "user", "content": few_shot_prompt}],
361
- max_tokens=3,
362
- temperature=0.1,
363
- top_p=0.9
364
- )
365
-
366
- return response.choices[0].message.content.strip().lower() == "s铆"
 
 
 
 
 
367
 
368
  # In retrieve_relevant_documents function
369
  def retrieve_relevant_documents(vectorstore, query, conversation_history, client, top_k=3, score_threshold=0.5):
 
182
  )
183
 
184
  return completion.choices[0].message.content.strip()
185
+
186
  def process_input():
187
  try:
188
  current_model = st.session_state.selected_model
 
224
  return response.choices[0].message.content.strip()
225
  except Exception as e:
226
  if attempt == max_retries - 1:
227
+ # Return user-friendly message instead of raising exception
228
+ return "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
229
  time.sleep(1)
230
+
231
 
232
  def perform_rag_query(input_text, conversation_history):
233
  try:
 
245
  return safe_api_call(model_messages), relevant_docs
246
 
247
  except Exception as e:
248
+ # Use standardized error message
249
+ return "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you.", ""
250
+
251
+ # Update these sections too:
252
+ if model_config.get('uses_classification', False):
253
+ try:
254
+ classification = get_classification(client, deployment, user_input)
255
+
256
+ if 'classifications' not in st.session_state:
257
+ st.session_state.classifications = {}
258
+ st.session_state.classifications[len(st.session_state.messages[current_model]) - 1] = classification
259
+
260
+ if classification == "0":
261
+ initial_response, initial_docs = perform_rag_query(user_input, conversation_history)
262
+ else:
263
+ model_messages = [
264
+ {"role": "system", "content": PERSONA_PREFIX + EMOTIONAL_PROMPT}
265
+ ] + st.session_state.messages[current_model]
266
+ initial_response = safe_api_call(model_messages)
267
+
268
+ except Exception as e:
269
+ # Replace error message with standardized message
270
+ initial_response = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
271
+
272
+ # And also update the RAG models section:
273
+ if model_config.get('uses_rag', False):
274
+ try:
275
+ if not initial_response:
276
+ initial_response, initial_docs = perform_rag_query(user_input, conversation_history)
277
+
278
+ verification_docs = retrieve_relevant_documents(
279
+ st.session_state.vectorstore,
280
+ initial_response,
281
+ conversation_history,
282
+ client=client
283
+ )
284
+
285
+ combined_docs = initial_docs + "\nContexto de verificaci贸n adicional:\n" + verification_docs
286
+
287
+ verification_messages = [
288
+ {
289
+ "role": "system",
290
+ "content": f"Pregunta del paciente:{user_input} \nContexto: {combined_docs} \nRespuesta anterior: {initial_response}\n Verifique la precisi贸n m茅dica de la respuesta anterior y refine la respuesta seg煤n el contexto adicional."
291
+ }
292
+ ]
293
+
294
+ assistant_reply = safe_api_call(verification_messages)
295
+
296
+ except Exception as e:
297
+ # Replace error message with standardized message
298
+ assistant_reply = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
299
+ else:
300
+ try:
301
+ model_messages = [
302
+ {"role": "system", "content": model_config['prompt']}
303
+ ] + st.session_state.messages[current_model]
304
+
305
+ assistant_reply = safe_api_call(model_messages)
306
+
307
+ except Exception as e:
308
+ # Replace error message with standardized message
309
+ assistant_reply = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
310
+
311
 
312
  initial_response = None
313
  initial_docs = ""
 
418
  Documento: {doc}
419
  Respuesta:"""
420
 
421
+ try:
422
+ response = client.chat.completions.create(
423
+ model=deployment,
424
+ messages=[{"role": "user", "content": few_shot_prompt}],
425
+ max_tokens=3,
426
+ temperature=0.1,
427
+ top_p=0.9
428
+ )
429
+
430
+ return response.choices[0].message.content.strip().lower() == "s铆"
431
+ except Exception as e:
432
+ # In case of error, default to false (not relevant)
433
+ print(f"Error in relevance check: {str(e)}")
434
+ return False
435
 
436
  # In retrieve_relevant_documents function
437
  def retrieve_relevant_documents(vectorstore, query, conversation_history, client, top_k=3, score_threshold=0.5):