import streamlit as st import openai # Configure OpenAI API from Streamlit secrets OPENAI_API_KEY = st.secrets.get("OPENAI_API_KEY") OPENAI_API_BASE = st.secrets.get("OPENAI_API_BASE") # Check if API key or base URL is missing if not OPENAI_API_KEY or not OPENAI_API_BASE: st.error("API key or base URL is missing in Streamlit secrets.") st.stop() # Configure OpenAI API openai.api_key = OPENAI_API_KEY openai.base_url = OPENAI_API_BASE # Configure Streamlit page settings st.set_page_config( page_title="LLama-3.1-405B Chatbot", page_icon="💬", layout="centered" ) # Initialize chat history in Streamlit session if not already present if "chat_history" not in st.session_state: st.session_state.chat_history = [] # Streamlit page title st.title("🤖 Llama 3.1 405B Chatbot") # LLM Model model = "meta-llama/Meta-Llama-3.1-405B-Instruct" # Display chat history for message in st.session_state.chat_history: with st.chat_message(message["role"]): st.markdown(message["content"]) # Input field for user's message user_prompt = st.chat_input("Ask the LLM...") if user_prompt: # Add the user's message to chat history and display it st.chat_message("user").markdown(user_prompt) st.session_state.chat_history.append({"role": "user", "content": user_prompt}) # Send the user's message to the model and get the assistant's response response = openai.chat.completions.create( model=model, messages=[ {"role": "system", "content": "You are a helpful assistant."}, *st.session_state.chat_history ] ) assistant_response = response.choices[0].message.content st.session_state.chat_history.append({"role": "assistant", "content": assistant_response}) # Display the assistant's response with st.chat_message("assistant"): st.markdown(assistant_response)