File size: 3,162 Bytes
b5ad4c9
 
cdabcae
b5ad4c9
cdabcae
 
 
 
 
 
 
b5ad4c9
1e98b4a
3cab307
d4e8e5e
f918386
c139381
1e98b4a
cdabcae
3cab307
1e98b4a
d4e8e5e
1e98b4a
 
cdabcae
d4e8e5e
1e98b4a
cdabcae
 
1e98b4a
cdabcae
 
1e98b4a
cdabcae
 
 
 
 
1e98b4a
b5ad4c9
cdabcae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71d2c34
cdabcae
 
 
 
 
 
1e98b4a
3cab307
 
cdabcae
3cab307
1e98b4a
 
 
 
 
 
 
 
 
 
 
 
cdabcae
1e98b4a
cdabcae
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import streamlit as st
import openai
import os

# Function to get the API key from Streamlit secrets
def get_api_key():
    try:
        return st.secrets["API_KEY"]
    except KeyError:
        st.error("API_KEY not found in Streamlit secrets. Please add it.")
        return None

# Function to interact with the OpenAI API with streaming
def generate_response(messages, model_name, api_key): # Modified to accept 'messages'
    try:
        client = openai.OpenAI(api_key=api_key)  # Instantiate OpenAI client with api_key

        stream = client.chat.completions.create(
            model=model_name,
            messages=messages, # Use the entire conversation history
            stream=True,
        )
        return stream
    except openai.APIError as e:
        st.error(f"OpenAI API Error with {model_name}: {e}")
        return None
    except openai.RateLimitError as e:
        st.error(f"OpenAI Rate Limit Error with {model_name}: {e}")
        return None
    except openai.AuthenticationError as e:
        st.error(f"OpenAI Authentication Error with {model_name}: {e}")
        return None
    except Exception as e:
        st.error(f"An unexpected error occurred with {model_name}: {e}")
        return None

# Main Streamlit app
def main():
    st.title("Chatbot with Model Switching and Streaming")

    # Initialize conversation history in session state
    if "messages" not in st.session_state:
        st.session_state.messages = []

    # Display previous messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    # Get user input
    prompt = st.chat_input("Say something")

    if prompt:
        # Add user message to the state
        st.session_state.messages.append({"role": "user", "content": prompt})
        with st.chat_message("user"):
            st.markdown(prompt)

        # Define model priority
        models = ["gpt-4o-mini", "gpt-3.5-turbo"]  # Add more models as needed

        # Get API key
        api_key = get_api_key()
        if not api_key:
            return

        full_response = ""
        # Prepare messages for OpenAI:
        openai_messages = st.session_state.messages
        for model in models:
            stream = generate_response(openai_messages, model, api_key) # Pass the messages
            if stream:
                with st.chat_message("assistant"):
                    message_placeholder = st.empty()
                    for chunk in stream:
                        if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
                            full_response += chunk.choices[0].delta.content
                            message_placeholder.markdown(full_response + "▌")
                    message_placeholder.markdown(full_response)
                break # Break after successful response
            full_response = "" # Reset for the next model attempt

        if full_response:
            # Add bot message to state
            st.session_state.messages.append({"role": "assistant", "content": full_response})

if __name__ == "__main__":
    main()