import streamlit as st import openai import os # Function to get the API key from Streamlit secrets def get_api_key(): try: return st.secrets["API_KEY"] except KeyError: st.error("API_KEY not found in Streamlit secrets. Please add it.") return None # Function to interact with the OpenAI API with streaming def generate_response(prompt, model_name, api_key, extra_instructions=""): openai.api_key = api_key try: client = openai.OpenAI() # Instantiate OpenAI client within the function messages = [{"role": "user", "content": prompt + "\n" + extra_instructions}] stream = client.chat.completions.create( model=model_name, messages=messages, stream=True, ) return stream except openai.APIError as e: st.error(f"OpenAI API Error with {model_name}: {e}") return None except openai.RateLimitError as e: st.error(f"OpenAI Rate Limit Error with {model_name}: {e}") return None except openai.AuthenticationError as e: st.error(f"OpenAI Authentication Error with {model_name}: {e}") return None except Exception as e: st.error(f"An unexpected error occurred with {model_name}: {e}") return None # Main Streamlit app def main(): st.title("Chatbot with Model Switching and Streaming") # Initialize conversation history in session state if "messages" not in st.session_state: st.session_state.messages = [] # Display previous messages for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Get user input prompt = st.chat_input("Say something") if prompt: # Add user message to the state st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) # Define model priority models = ["gpt-4-1106-preview", "gpt-3.5-turbo"] # Add more models as needed # Get API key api_key = get_api_key() if not api_key: return full_response = "" for model in models: stream = generate_response(prompt, model, api_key, extra_instructions="Please address the user as Jane Doe. The user has a premium account.") if stream: with st.chat_message("assistant"): message_placeholder = st.empty() for chunk in stream: if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: full_response += chunk.choices[0].delta.content message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) break # Break after successful response full_response = "" # Reset for the next model attempt if full_response: # Add bot message to state st.session_state.messages.append({"role": "assistant", "content": full_response}) if __name__ == "__main__": main()