import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
def runLLM (): | |
model = AutoModelForCausalLM.from_pretrained("cyberagent/open-calm-small", device_map="auto", torch_dtype=torch.float16) | |
tokenizer = AutoTokenizer.from_pretrained("cyberagent/open-calm-small") | |
inputs = tokenizer("AIによって私達の暮らしは、", return_tensors="pt").to(model.device) | |
with torch.no_grad(): | |
tokens = model.generate( | |
**inputs, | |
max_new_tokens=640, | |
do_sample=True, | |
temperature=0.7, | |
top_p=0.9, | |
repetition_penalty=1.05, | |
pad_token_id=tokenizer.pad_token_id, | |
) | |
output = tokenizer.decode(tokens[0], skip_special_tokens=True) | |
return output | |
def display_message(): | |
msg = runLLM() | |
return msg | |
iface = gr.Interface(fn=display_message, inputs=None, outputs="text") | |
iface.launch() | |