File size: 1,329 Bytes
8865786 21776ed 8865786 3d48c86 8865786 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
from huggingface_hub import hf_hub_download
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-base")
model_file = hf_hub_download(repo_id="tuongvxx1/medBot", filename="medicalBot_ver4_2.pth")
state_dict = torch.load(model_file, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
model.to(device)
def generate_answer(question, model, tokenizer, device):
model.eval()
input_text = "hỏi: " + question
inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding="max_length")
input_ids = inputs.input_ids.to(device)
attention_mask = inputs.attention_mask.to(device)
with torch.no_grad():
outputs = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_length=128, num_beams=4, early_stopping=True)
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
return answer
def run(ques):
return generate_answer(ques, model, tokenizer, device)
demo = gr.Interface(fn=run, inputs=gr.Textbox(label="Nhập câu hỏi"), outputs=gr.Textbox(label="Câu trả lời"))
demo.launch() |