Spaces:
Sleeping
Sleeping
File size: 2,050 Bytes
782ba28 9e4f83d 782ba28 9e4f83d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import streamlit as st
from langchain_core.messages.chat import ChatMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_teddynote.prompts import load_prompt
from dotenv import load_dotenv
from langchain import hub
load_dotenv()
st.title("๋๋ง์ ์ฑGPT๐ฌ")
# ์ฒ์ 1๋ฒ๋ง ์คํํ๊ธฐ ์ํ ์ฝ๋
if "messages" not in st.session_state:
st.session_state["messages"] = []
# ์ฌ์ด๋๋ฐ ์์ฑ
with st.sidebar:
clear_btn = st.button("๋ํ ์ด๊ธฐํ")
selected_prompt = st.selectbox("ํ๋กฌํํธ๋ฅผ ์ ํํด ์ฃผ์ธ์", ("๊ธฐ๋ณธ๋ชจ๋"), index=0)
# ์ด์ ๋ํ๋ฅผ ์ถ๋ ฅ
def print_messages():
for chat_message in st.session_state["messages"]:
st.chat_message(chat_message.role).write(chat_message.content)
# ์๋ก์ด ๋ฉ์์ง๋ฅผ ์ถ๊ฐ
def add_message(role, message):
st.session_state["messages"].append(ChatMessage(role=role, content=message))
# ์ฒด์ธ ์์ฑ
def create_chain(prompt_type):
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"๋น์ ์ ์น์ ํ AI ์ด์์คํดํธ์
๋๋ค. ๋ค์์ ์ง๋ฌธ์ ๊ฐ๊ฒฐํ๊ฒ ๋ต๋ณํด ์ฃผ์ธ์.",
),
("user", "#Question:\n{question}"),
]
)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
return prompt | llm | StrOutputParser()
if clear_btn:
st.session_state["messages"] = []
print_messages()
user_input = st.chat_input("๊ถ๊ธํ ๋ด์ฉ์ ๋ฌผ์ด๋ณด์ธ์!")
if user_input:
st.chat_message("user").write(user_input)
chain = create_chain(selected_prompt)
response = chain.stream({"question": user_input})
with st.chat_message("assistant"):
container = st.empty()
ai_answer = ""
for token in response:
ai_answer += token
container.markdown(ai_answer)
add_message("user", user_input)
add_message("assistant", ai_answer)
|