daniilf commited on
Commit
80f09db
verified
1 Parent(s): fc63097

Updating to requested error message

Browse files
Files changed (1) hide show
  1. pages/_2S_Model.py +81 -13
pages/_2S_Model.py CHANGED
@@ -178,6 +178,7 @@ def get_classification(client, deployment, user_input):
178
  )
179
 
180
  return completion.choices[0].message.content.strip()
 
181
  def process_input():
182
  try:
183
  current_model = st.session_state.selected_model
@@ -219,9 +220,10 @@ def process_input():
219
  return response.choices[0].message.content.strip()
220
  except Exception as e:
221
  if attempt == max_retries - 1:
222
- raise Exception(f"Failed to get response after {max_retries} attempts: {str(e)}")
223
- st.warning(f"Attempt {attempt + 1} failed, retrying...")
224
  time.sleep(1)
 
225
 
226
  def perform_rag_query(input_text, conversation_history):
227
  try:
@@ -239,8 +241,69 @@ def process_input():
239
  return safe_api_call(model_messages), relevant_docs
240
 
241
  except Exception as e:
242
- st.error(f"Error in RAG query: {str(e)}")
243
- return "Lo siento, hubo un error al procesar tu consulta. Por favor, intenta nuevamente.", ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
  initial_response = None
246
  initial_docs = ""
@@ -351,15 +414,20 @@ def check_document_relevance(query, doc, client):
351
  Documento: {doc}
352
  Respuesta:"""
353
 
354
- response = client.chat.completions.create(
355
- model=deployment,
356
- messages=[{"role": "user", "content": few_shot_prompt}],
357
- max_tokens=3,
358
- temperature=0.1,
359
- top_p=0.9
360
- )
361
-
362
- return response.choices[0].message.content.strip().lower() == "s铆"
 
 
 
 
 
363
 
364
  # In retrieve_relevant_documents function
365
  def retrieve_relevant_documents(vectorstore, query, conversation_history, client, top_k=3, score_threshold=0.5):
 
178
  )
179
 
180
  return completion.choices[0].message.content.strip()
181
+
182
  def process_input():
183
  try:
184
  current_model = st.session_state.selected_model
 
220
  return response.choices[0].message.content.strip()
221
  except Exception as e:
222
  if attempt == max_retries - 1:
223
+ # Return user-friendly message instead of raising exception
224
+ return "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
225
  time.sleep(1)
226
+
227
 
228
  def perform_rag_query(input_text, conversation_history):
229
  try:
 
241
  return safe_api_call(model_messages), relevant_docs
242
 
243
  except Exception as e:
244
+ # Use standardized error message
245
+ return "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you.", ""
246
+
247
+ # Update these sections too:
248
+ if model_config.get('uses_classification', False):
249
+ try:
250
+ classification = get_classification(client, deployment, user_input)
251
+
252
+ if 'classifications' not in st.session_state:
253
+ st.session_state.classifications = {}
254
+ st.session_state.classifications[len(st.session_state.messages[current_model]) - 1] = classification
255
+
256
+ if classification == "0":
257
+ initial_response, initial_docs = perform_rag_query(user_input, conversation_history)
258
+ else:
259
+ model_messages = [
260
+ {"role": "system", "content": PERSONA_PREFIX + EMOTIONAL_PROMPT}
261
+ ] + st.session_state.messages[current_model]
262
+ initial_response = safe_api_call(model_messages)
263
+
264
+ except Exception as e:
265
+ # Replace error message with standardized message
266
+ initial_response = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
267
+
268
+ # And also update the RAG models section:
269
+ if model_config.get('uses_rag', False):
270
+ try:
271
+ if not initial_response:
272
+ initial_response, initial_docs = perform_rag_query(user_input, conversation_history)
273
+
274
+ verification_docs = retrieve_relevant_documents(
275
+ st.session_state.vectorstore,
276
+ initial_response,
277
+ conversation_history,
278
+ client=client
279
+ )
280
+
281
+ combined_docs = initial_docs + "\nContexto de verificaci贸n adicional:\n" + verification_docs
282
+
283
+ verification_messages = [
284
+ {
285
+ "role": "system",
286
+ "content": f"Pregunta del paciente:{user_input} \nContexto: {combined_docs} \nRespuesta anterior: {initial_response}\n Verifique la precisi贸n m茅dica de la respuesta anterior y refine la respuesta seg煤n el contexto adicional."
287
+ }
288
+ ]
289
+
290
+ assistant_reply = safe_api_call(verification_messages)
291
+
292
+ except Exception as e:
293
+ # Replace error message with standardized message
294
+ assistant_reply = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
295
+ else:
296
+ try:
297
+ model_messages = [
298
+ {"role": "system", "content": model_config['prompt']}
299
+ ] + st.session_state.messages[current_model]
300
+
301
+ assistant_reply = safe_api_call(model_messages)
302
+
303
+ except Exception as e:
304
+ # Replace error message with standardized message
305
+ assistant_reply = "This question is not currently supported by the conversation agent or is being flagged by the AI algorithm as being outside its parameters. If you think the question should be answered, please inform the research team what should be added with justification and if available please provide links to resources to support further model training. Thank you."
306
+
307
 
308
  initial_response = None
309
  initial_docs = ""
 
414
  Documento: {doc}
415
  Respuesta:"""
416
 
417
+ try:
418
+ response = client.chat.completions.create(
419
+ model=deployment,
420
+ messages=[{"role": "user", "content": few_shot_prompt}],
421
+ max_tokens=3,
422
+ temperature=0.1,
423
+ top_p=0.9
424
+ )
425
+
426
+ return response.choices[0].message.content.strip().lower() == "s铆"
427
+ except Exception as e:
428
+ # In case of error, default to false (not relevant)
429
+ print(f"Error in relevance check: {str(e)}")
430
+ return False
431
 
432
  # In retrieve_relevant_documents function
433
  def retrieve_relevant_documents(vectorstore, query, conversation_history, client, top_k=3, score_threshold=0.5):