gorani-v0 / app.py
heegyu's picture
prompt μ—†μ• λŠ”κ²Œ λ‚«λ‹€
bb7428a
import gradio as gr
import torch
import random
import time
from transformers import pipeline
# model_name = '../checkpoint/koalpaca/ajoublue-gpt2-medium/epoch-4-last/'
generator = pipeline(
'text-generation',
model="heegyu/gorani-v0",
device="cuda:0" if torch.cuda.is_available() else 'cpu'
)
def query(message, chat_history, max_turn=2):
# prompt = [
# "<usr> λ„Œ ν•œκ΅­μ–΄ 챗봇 κ³ λΌλ‹ˆμ•Ό. λ„ˆλŠ” λ‚΄κ°€ λ¬»λŠ” μ§ˆλ¬Έμ— λ‹΅ν•˜κ³  μ§€μ‹œμ‚¬ν•­μ— λ§žλŠ” λŒ€λ‹΅μ„ ν•΄μ•Όν•΄.",
# "<bot> λ„€, μ €λŠ” ν•œκ΅­μ–΄ 챗봇 κ³ λΌλ‹ˆμž…λ‹ˆλ‹€. κΆκΈˆν•œ 것을 λ¬Όμ–΄λ³΄μ„Έμš”. "
# ]
prompt = []
if len(chat_history) > max_turn:
chat_history = chat_history[-max_turn:]
for i, (user, bot) in enumerate(chat_history):
# if i == 0:
# prompt.append(f"<usr> λ°˜κ°€μ›Œ λ„ˆλŠ” ν•œκ΅­μ–΄ 챗봇이고 이름은 κ³ λΌλ‹ˆμ•Ό. {user}")
# else:
prompt.append(f"<usr> {user}")
prompt.append(f"<bot> {bot}")
prompt.append(f"<usr> {message}")
prompt = "\n".join(prompt) + "\n<bot>"
output = generator(
prompt,
do_sample=True,
top_p=0.9,
early_stopping=True,
max_new_tokens=256,
)[0]['generated_text']
print(output)
response = output[len(prompt):]
return response.strip()
with gr.Blocks() as demo:
chatbot = gr.Chatbot().style(height=700)
msg = gr.Textbox()
clear = gr.Button("Clear")
def respond(message, chat_history):
bot_message = query(message, chat_history) #random.choice(["How are you?", "I love you", "I'm very hungry"])
chat_history.append((message, bot_message))
# time.sleep(1)
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch()