Spaces:
Sleeping
Sleeping
File size: 1,894 Bytes
b5ad4c9 d72bbc5 b5ad4c9 d72bbc5 d4e8e5e d72bbc5 d4e8e5e d72bbc5 d4e8e5e b5ad4c9 d72bbc5 d4e8e5e d72bbc5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import streamlit as st
import openai
# Set up the OpenAI API key
openai.api_key = st.secrets['API_KEY']
# Define the models in order of preference
MODELS = ["gpt-4", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
# Initialize session state to store conversation history
if "conversation" not in st.session_state:
st.session_state.conversation = []
# Function to get a response from the OpenAI API
def get_chat_response(prompt, model_index=0):
try:
response = openai.ChatCompletion.create(
model=MODELS[model_index],
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message['content']
except openai.error.RateLimitError:
if model_index + 1 < len(MODELS):
st.warning(f"Quota exceeded for {MODELS[model_index]}. Switching to {MODELS[model_index + 1]}.")
return get_chat_response(prompt, model_index + 1)
else:
st.error("All models have exceeded their quota. Please try again later.")
return None
except Exception as e:
st.error(f"An error occurred: {e}")
return None
# Streamlit UI
st.title("ChatGPT with Model Fallback")
# Display previous conversation
st.write("### Conversation History")
for message in st.session_state.conversation:
st.write(f"**{message['role']}**: {message['content']}")
# User input
user_input = st.text_input("You: ", "")
if user_input:
# Add user input to conversation history
st.session_state.conversation.append({"role": "User", "content": user_input})
# Get response from the model
response = get_chat_response(user_input)
if response:
# Add model response to conversation history
st.session_state.conversation.append({"role": "Assistant", "content": response})
# Display the response
st.write(f"**Assistant**: {response}") |