Spaces:
Runtime error
Runtime error
import itertools | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
print(f"device: {device}") | |
tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt-neox-3.6b-instruction-sft", use_fast=False) | |
model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt-neox-3.6b-instruction-sft", device_map="auto", torch_dtype=torch.float16) | |
model = model.to(device) | |
def inference_func(prompt, max_new_tokens=128, temperature=0.7): | |
token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") | |
output_ids = model.generate( | |
token_ids.to(model.device), | |
do_sample=True, | |
max_new_tokens=max_new_tokens, | |
temperature=temperature, | |
pad_token_id=tokenizer.pad_token_id, | |
bos_token_id=tokenizer.bos_token_id, | |
eos_token_id=tokenizer.eos_token_id | |
) | |
output = tokenizer.decode(output_ids.tolist()[0][token_ids.size(1):], skip_special_tokens=True) | |
output = output.replace("<NL>", "\n") | |
return output | |
def make_prompt(message, chat_history, max_context_size: int = 10): | |
contexts = chat_history + [[message, ""]] | |
contexts = list(itertools.chain.from_iterable(contexts)) | |
if max_context_size > 0: | |
context_size = max_context_size - 1 | |
else: | |
context_size = 100000 | |
contexts = contexts[-context_size:] | |
prompt = [] | |
for idx, context in enumerate(reversed(contexts)): | |
if idx % 2 == 0: | |
prompt = [f"システム: {context}"] + prompt | |
else: | |
prompt = [f"ユーザー: {context}"] + prompt | |
prompt = "<NL>".join(prompt) | |
return prompt | |
def interact_func(message, chat_history, max_context_size, max_new_tokens, temperature): | |
prompt = make_prompt(message, chat_history, max_context_size) | |
print(f"prompt: {prompt}") | |
generated = inference_func(prompt, max_new_tokens, temperature) | |
print(f"generated: {generated}") | |
chat_history.append((message, generated)) | |
return "", chat_history | |
with gr.Blocks() as demo: | |
with gr.Accordion("Configs", open=False): | |
# max_context_size = the number of turns * 2 | |
max_context_size = gr.Number(value=10, label="max_context_size", precision=0) | |
max_new_tokens = gr.Number(value=128, label="max_new_tokens", precision=0) | |
temperature = gr.Slider(0.0, 2.0, value=0.7, step=0.1, label="temperature") | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox() | |
clear = gr.Button("Clear") | |
msg.submit(interact_func, [msg, chatbot, max_context_size, max_new_tokens, temperature], [msg, chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
if __name__ == "__main__": | |
demo.launch(debug=True) |