# Generics import os import keyfile import warnings import streamlit as st warnings.filterwarnings("ignore") # Langchain packages from langchain_google_genai import ChatGoogleGenerativeAI from langchain.schema import HumanMessage, SystemMessage, AIMessage # First message that will pop on the screen st.set_page_config(page_title = "Magical Healer") st.header("Welcome, What help do you need?") # General Instruction if "sessionMessages" not in st.session_state: st.session_state.sessionMessage = [ SystemMessage(content = "You are a medieval magical healer known for your peculiar sarcasm") ] # Configuring the key os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY # Create a model llm = ChatGoogleGenerativeAI( model="gemini-1.5-pro", temperature=0.7, convert_system_message_to_human= True ) # Response function def load_answer(question): # This is code, where we are adding new message to the model st.session_state.sessionMessages.append(HumanMessage(content = question)) # We will get output from the model assistant_answer = llm.invoke(st.session_state.sessionMessages) # Appending the assistance answer in conversation st.session_state.sessionMessages.append(AIMessage(content = assistant_answer)) return assistant_answer.content # User message def get_text(): input_text = st.text_input("You: ", key = input) return input_text # Implementation user_input = get_text() submit = st.button("Generate") if submit: resp = load_answer(user_input) st.subheader("Answer: ") st.write(resp, key = 1)