artintel235 commited on
Commit
2c01309
·
verified ·
1 Parent(s): 033bc76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -22,16 +22,20 @@ def generate_response(messages, model_name, api_key): # Modified to accept 'mess
22
  )
23
  return stream
24
  except openai.APIError as e:
25
- st.error(f"OpenAI API Error with {model_name}: {e}")
 
26
  return None
27
  except openai.RateLimitError as e:
28
- st.error(f"OpenAI Rate Limit Error with {model_name}: {e}")
 
29
  return None
30
  except openai.AuthenticationError as e:
31
- st.error(f"OpenAI Authentication Error with {model_name}: {e}")
 
32
  return None
33
  except Exception as e:
34
- st.error(f"An unexpected error occurred with {model_name}: {e}")
 
35
  return None
36
 
37
  # Main Streamlit app
@@ -76,11 +80,11 @@ def main():
76
  full_response += chunk.choices[0].delta.content
77
  message_placeholder.markdown(full_response + "▌")
78
  message_placeholder.markdown(full_response)
 
79
  break # Break after successful response
80
  full_response = "" # Reset for the next model attempt
81
 
82
  if full_response:
83
- print(f"Using {model} for generation")
84
  # Add bot message to state
85
  st.session_state.messages.append({"role": "assistant", "content": full_response})
86
 
 
22
  )
23
  return stream
24
  except openai.APIError as e:
25
+ # Log the error for debugging, but don't display it in the UI
26
+ print(f"OpenAI API Error with {model_name}: {e}")
27
  return None
28
  except openai.RateLimitError as e:
29
+ # Log the error for debugging, but don't display it in the UI
30
+ print(f"OpenAI Rate Limit Error with {model_name}: {e}")
31
  return None
32
  except openai.AuthenticationError as e:
33
+ # Log the error for debugging, but don't display it in the UI
34
+ print(f"OpenAI Authentication Error with {model_name}: {e}")
35
  return None
36
  except Exception as e:
37
+ # Log the error for debugging, but don't display it in the UI
38
+ print(f"An unexpected error occurred with {model_name}: {e}")
39
  return None
40
 
41
  # Main Streamlit app
 
80
  full_response += chunk.choices[0].delta.content
81
  message_placeholder.markdown(full_response + "▌")
82
  message_placeholder.markdown(full_response)
83
+ print(f"Using {model} for generation")
84
  break # Break after successful response
85
  full_response = "" # Reset for the next model attempt
86
 
87
  if full_response:
 
88
  # Add bot message to state
89
  st.session_state.messages.append({"role": "assistant", "content": full_response})
90