opt350m-test / app.py
MS-YUN
Add application file
9b7601c
# ๋ชจ๋ธ ๋กœ๋”ฉ
import torch
from peft import PeftConfig, PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
device = "cuda" if torch.cuda.is_available() else "cpu"
base_model_name = "facebook/opt-350m"
adapter_model_name = 'msy127/opt-350m-aihubqa-130-dpo-adapter'
model = AutoModelForCausalLM.from_pretrained(base_model_name)
model = PeftModel.from_pretrained(model, adapter_model_name).to(device)
tokenizer = AutoTokenizer.from_pretrained(adapter_model_name)
# ๋Œ€ํ™” ๋ˆ„์  ํ•จ์ˆ˜ (history) - prompt ์ž๋ฆฌ์— history๊ฐ€ ๋“ค์–ด๊ฐ -> dialoGPT๋Š” ๋ชจ๋ธ ์ง‘์–ด๋„ฃ๊ธฐ ์ „์— ์ธ์ฝ”๋”ฉ์„ ํ–ˆ์—ˆ๋Š”๋ฐ OPENAI๋Š” ์ธ์ฝ”๋”ฉ์„ ์•ˆํ•œ๋‹ค.
def predict(input, history):
history.append({"role": "user", "content": input})
# ์ผ๋ฐ˜๋ชจ๋ธ
prompt = f"An AI tool that looks at the context and question separated by triple backquotes, finds the answer corresponding to the question in the context, and answers clearly.\n### Input: ```{input}```\n ### Output: "
inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
outputs = model.generate(input_ids=inputs, max_length=256)
generated_text = tokenizer.decode(outputs[0])
start_idx = len(prompt) + len('</s>')
stop_first_idx = generated_text.find("### Input:") # ์ฒซ ๋ฒˆ์งธ "### Input:"์„ ์ฐพ์Šต๋‹ˆ๋‹ค.
stop_idx = generated_text.find("### Input:", stop_first_idx + 1) # ์ฒซ ๋ฒˆ์งธ "### Input:" ์ดํ›„์˜ ๋ฌธ์ž์—ด์—์„œ ๋‹ค์‹œ "### Input:"์„ ์ฐพ์Šต๋‹ˆ๋‹ค.
# print(start_idx , stop_idx)
# print(generated_text)
if stop_idx != -1:
response = generated_text[start_idx:stop_idx] # prompt ๋’ค์— ์žˆ๋Š” ์ƒˆ๋กญ๊ฒŒ ์ƒ์„ฑ๋œ ํ…์ŠคํŠธ๋งŒ ("### Input:" ์ „๊นŒ์ง€) ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค.
# ๋ˆ„์ 
history.append({"role": "assistant", "content": response})
# messages = [(history[i]["content"], history[i+1]["content"]) for i in range(1, len(history), 2)]
messages = [(history[i]["content"], history[i+1]["content"]) for i in range(0, len(history) - 1, 2)]
return messages, history
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
import gradio as gr
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="ChatBot")
state = gr.State([
{"role": "system", "content": "๋‹น์‹ ์€ ์นœ์ ˆํ•œ ์ธ๊ณต์ง€๋Šฅ ์ฑ—๋ด‡์ž…๋‹ˆ๋‹ค. ์ž…๋ ฅ์— ๋Œ€ํ•ด ์งง๊ณ  ๊ฐ„๊ฒฐํ•˜๊ณ  ์นœ์ ˆํ•˜๊ฒŒ ๋Œ€๋‹ตํ•ด์ฃผ์„ธ์š”."}])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="์ฑ—๋ด‡์—๊ฒŒ ์•„๋ฌด๊ฑฐ๋‚˜ ๋ฌผ์–ด๋ณด์„ธ์š”").style(container=False)
# txt.submit(predict, [txt, state], [chatbot, state])
txt.submit(predict, [txt, state], [chatbot, state])
# demo.launch(debug=True, share=True)
demo.launch()
# from PIL import Image
# import gradio as gr
# interface = gr.Interface(
# fn=classify_image,
# inputs=gr.components.Image(type="pil", label="Upload an Image"),
# outputs="text",
# live=True
# )
# interface.launch()