Spaces:
Runtime error
Runtime error
File size: 2,301 Bytes
40ce3f0 e95c19b 40ce3f0 db1effe e95c19b c1b5924 08b9856 db1effe 08b9856 9bc4ac7 2994ed9 9bc4ac7 b0728fd 9bc4ac7 b0728fd 9bc4ac7 08b9856 db1effe 20c0da4 db1effe 08b9856 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
# Generics
import os
import keyfile
import warnings
import streamlit as st
from pydantic import BaseModel
warnings.filterwarnings("ignore")
# Langchain packages
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage
# First message that will pop on the screen
st.set_page_config(page_title = "Magical Healer")
st.header("Welcome, What help do you need?")
class AIMessage(BaseModel):
content: str
# initializing the sessionMessages
if "sessionMessages" not in st.session_state:
st.session_state["sessionMessages"] = []
# General Instruction
if "sessionMessages" not in st.session_state:
st.session_state.sessionMessage = [
SystemMessage(content = "You are a medieval magical healer known for your peculiar sarcasm")
]
# Configuring the key
os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY
# Create a model
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro",
temperature=0.7,
convert_system_message_to_human= True
)
# Response function
def load_answer(question):
st.session_state.sessionMessages.append(HumanMessage(content=question))
assistant_response = llm.invoke(st.session_state.sessionMessages)
# Assuming assistant_response is an object with a 'content' attribute
if hasattr(assistant_response, 'content') and isinstance(assistant_response.content, str):
processed_content = assistant_response.content
st.session_state.sessionMessages.append(AIMessage(content=processed_content))
else:
st.error("Invalid response received from AI.")
processed_content = "Sorry, I couldn't process your request."
return processed_content
# def load_answer(question):
# st.session_state.sessionMessages.append(HumanMessage(content = question))
# assistant_answer = llm.invoke(st.session_state.sessionMessages)
# st.session_state.sessionMessages.append(AIMessage(content = assistant_answer))
# return assistant_answer.content
# User message
def get_text():
input_text = st.text_input("You: ", key = input)
return input_text
# Implementation
user_input = get_text()
submit = st.button("Generate")
if submit:
resp = load_answer(user_input)
st.subheader("Answer: ")
st.write(resp, key = 1)
|