Unggi's picture
test
ce5b5d6
raw
history blame
3.55 kB
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# .env ํŒŒ์ผ์—์„œ OPENAI_API_KEY ๋กœ๋“œ
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o-mini",
max_tokens=512,
temperature=0.7,
top_p=0.95):
"""
OpenAI ChatCompletion API๋ฅผ ํ†ตํ•ด ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜.
- history: [(user_text, assistant_text), ...]
- user_message: ์‚ฌ์šฉ์ž๊ฐ€ ๋ฐฉ๊ธˆ ์ž…๋ ฅํ•œ ๋ฉ”์‹œ์ง€
"""
# 1) ์‹œ์Šคํ…œ ๋ฉ”์‹œ์ง€(=MAIN_PROMPT)๋ฅผ ๊ฐ€์žฅ ์•ž์— ์ถ”๊ฐ€
messages = [{"role": "system", "content": MAIN_PROMPT}]
# 2) ๊ธฐ์กด ๋Œ€ํ™” ๊ธฐ๋ก(history)์„ OpenAI ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
# user_text -> 'user' / assistant_text -> 'assistant'
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
# 3) ๋งˆ์ง€๋ง‰์— ์ด๋ฒˆ ์‚ฌ์šฉ์ž์˜ ์ž…๋ ฅ์„ ์ถ”๊ฐ€
messages.append({"role": "user", "content": user_message})
# 4) OpenAI API ํ˜ธ์ถœ
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
return completion.choices[0].message.content
def respond(user_message, history):
"""
Gradio ์ƒ์—์„œ submitํ•  ๋•Œ ํ˜ธ์ถœ๋˜๋Š” ํ•จ์ˆ˜
- user_message: ์‚ฌ์šฉ์ž๊ฐ€ ๋ฐฉ๊ธˆ ์นœ ๋ฉ”์‹œ์ง€
- history: ๊ธฐ์กด (user, assistant) ํŠœํ”Œ ๋ฆฌ์ŠคํŠธ
"""
# ์‚ฌ์šฉ์ž๊ฐ€ ๋นˆ ๋ฌธ์ž์—ด์„ ๋ณด๋ƒˆ๋‹ค๋ฉด ์•„๋ฌด ์ผ๋„ ํ•˜์ง€ ์•Š์Œ
if not user_message:
return "", history
# GPT ๋ชจ๋ธ๋กœ๋ถ€ํ„ฐ ์‘๋‹ต์„ ๋ฐ›์Œ
assistant_reply = gpt_call(history, user_message)
# history์— (user, assistant) ์Œ ์ถ”๊ฐ€
history.append((user_message, assistant_reply))
# Gradio์—์„œ๋Š” (์ƒˆ๋กœ ๋น„์›Œ์งˆ ์ž…๋ ฅ์ฐฝ, ๊ฐฑ์‹ ๋œ history)๋ฅผ ๋ฐ˜ํ™˜
return "", history
##############################
# Gradio Blocks UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## Simple Chat Interface")
# Chatbot ์ดˆ๊ธฐ ์ƒํƒœ๋ฅผ ์„ค์ •
# ์ฒซ ๋ฒˆ์งธ ๋ฉ”์‹œ์ง€๋Š” (user="", assistant=INITIAL_PROMPT) ํ˜•ํƒœ๋กœ ๋„ฃ์–ด
# ํ™”๋ฉด์ƒ์—์„œ 'assistant'๊ฐ€ INITIAL_PROMPT๋ฅผ ๋งํ•œ ๊ฒƒ์ฒ˜๋Ÿผ ๋ณด์ด๊ฒŒ ํ•จ
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)], # (user, assistant)
height=500
)
# (user, assistant) ์Œ์„ ์ €์žฅํ•  ํžˆ์Šคํ† ๋ฆฌ ์ƒํƒœ
# ์—ฌ๊ธฐ์„œ๋„ ๋™์ผํ•œ ์ดˆ๊ธฐ ์ƒํƒœ๋ฅผ ๋„ฃ์–ด์คŒ
state_history = gr.State([("", INITIAL_PROMPT)])
# ์‚ฌ์šฉ์ž ์ž…๋ ฅ
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Input"
)
# ์ž…๋ ฅ์ด submit๋˜๋ฉด respond() ํ˜ธ์ถœ โ†’ ์ถœ๋ ฅ์€ (์ƒˆ ์ž…๋ ฅ์ฐฝ, ๊ฐฑ์‹ ๋œ chatbot)
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
# respond ๋๋‚œ ๋’ค, ์ตœ์‹  history๋ฅผ state_history์— ๋ฐ˜์˜
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
# ๋ฉ”์ธ ์‹คํ–‰
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)