File size: 1,848 Bytes
a5074f0 8e1c181 a5074f0 8e1c181 a5074f0 bb7428a a5074f0 bb7428a 936721d a5074f0 bb7428a a5074f0 bb7428a a5074f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
import torch
import random
import time
from transformers import pipeline
# model_name = '../checkpoint/koalpaca/ajoublue-gpt2-medium/epoch-4-last/'
generator = pipeline(
'text-generation',
model="heegyu/gorani-v0",
device="cuda:0" if torch.cuda.is_available() else 'cpu'
)
def query(message, chat_history, max_turn=2):
# prompt = [
# "<usr> λ νκ΅μ΄ μ±λ΄ κ³ λΌλμΌ. λλ λ΄κ° 묻λ μ§λ¬Έμ λ΅νκ³ μ§μμ¬νμ λ§λ λλ΅μ ν΄μΌν΄.",
# "<bot> λ€, μ λ νκ΅μ΄ μ±λ΄ κ³ λΌλμ
λλ€. κΆκΈν κ²μ λ¬Όμ΄λ³΄μΈμ. "
# ]
prompt = []
if len(chat_history) > max_turn:
chat_history = chat_history[-max_turn:]
for i, (user, bot) in enumerate(chat_history):
# if i == 0:
# prompt.append(f"<usr> λ°κ°μ λλ νκ΅μ΄ μ±λ΄μ΄κ³ μ΄λ¦μ κ³ λΌλμΌ. {user}")
# else:
prompt.append(f"<usr> {user}")
prompt.append(f"<bot> {bot}")
prompt.append(f"<usr> {message}")
prompt = "\n".join(prompt) + "\n<bot>"
output = generator(
prompt,
do_sample=True,
top_p=0.9,
early_stopping=True,
max_new_tokens=256,
)[0]['generated_text']
print(output)
response = output[len(prompt):]
return response.strip()
with gr.Blocks() as demo:
chatbot = gr.Chatbot().style(height=700)
msg = gr.Textbox()
clear = gr.Button("Clear")
def respond(message, chat_history):
bot_message = query(message, chat_history) #random.choice(["How are you?", "I love you", "I'm very hungry"])
chat_history.append((message, bot_message))
# time.sleep(1)
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch() |