chatbot / app.py
artintel235's picture
Update app.py
d72bbc5 verified
raw
history blame
1.89 kB
import streamlit as st
import openai
# Set up the OpenAI API key
openai.api_key = st.secrets['API_KEY']
# Define the models in order of preference
MODELS = ["gpt-4", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
# Initialize session state to store conversation history
if "conversation" not in st.session_state:
st.session_state.conversation = []
# Function to get a response from the OpenAI API
def get_chat_response(prompt, model_index=0):
try:
response = openai.ChatCompletion.create(
model=MODELS[model_index],
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message['content']
except openai.error.RateLimitError:
if model_index + 1 < len(MODELS):
st.warning(f"Quota exceeded for {MODELS[model_index]}. Switching to {MODELS[model_index + 1]}.")
return get_chat_response(prompt, model_index + 1)
else:
st.error("All models have exceeded their quota. Please try again later.")
return None
except Exception as e:
st.error(f"An error occurred: {e}")
return None
# Streamlit UI
st.title("ChatGPT with Model Fallback")
# Display previous conversation
st.write("### Conversation History")
for message in st.session_state.conversation:
st.write(f"**{message['role']}**: {message['content']}")
# User input
user_input = st.text_input("You: ", "")
if user_input:
# Add user input to conversation history
st.session_state.conversation.append({"role": "User", "content": user_input})
# Get response from the model
response = get_chat_response(user_input)
if response:
# Add model response to conversation history
st.session_state.conversation.append({"role": "Assistant", "content": response})
# Display the response
st.write(f"**Assistant**: {response}")