from dotenv import load_dotenv load_dotenv() import streamlit as st import os import google.generativeai as genai # Configure Google Generative AI genai.configure(api_key=os.getenv("GOOGLE_KEY")) model = genai.GenerativeModel("gemini-pro") chat = model.start_chat(history=[]) def get_gemini_response(prompt): try: response = chat.send_message(prompt, stream=True) return response except Exception as e: return f"An error occurred: {str(e)}" # Streamlit app configuration st.set_page_config(page_title = "Med ChatBot") st.header = ("ChatBot App") # Initialize session state for chat history if "chat_history" not in st.session_state: st.session_state["chat_history"] = [] # Input and submission button input_text = st.text_input("Input: ", key="input") submit = st.button("Ask the question") if submit and input_text: # Context for the LLM with history included chat_history_text = " ".join([f"{role}: {text}" for role, text in st.session_state["chat_history"]]) context = ( "You are a medical chatbot designed to assist users in understanding their symptoms. " "Provide clear, concise, and informative responses based on NHS guidelines. " "Avoid technical jargon and code snippets. If asked a question unrelated to medical topics, " "respond with: 'I am a medical bot and I don't have that knowledge.' " f"Previous conversation: {chat_history_text} " ) prompt = f"{context} User's latest input: {input_text}" # Include the latest user input response = get_gemini_response(prompt) # Add user query to session state chat history st.session_state['chat_history'].append(("You", input_text)) st.subheader("The Response is") if isinstance(response, str): # Handle cases where response might be a string st.write(response) st.session_state['chat_history'].append(("Bot", response)) else: for chunk in response: st.write(chunk.text) st.session_state['chat_history'].append(("Bot", chunk.text)) st.subheader("The Chat History is") for role, text in st.session_state['chat_history']: st.write(f"{role}: {text}")