medBot4 / app.py
tuongvxx1's picture
Update app.py
618c89f verified
raw
history blame
1.5 kB
import gradio as gr
from huggingface_hub import hf_hub_download
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-base")
model_file = hf_hub_download(repo_id="tuongvxx1/medBot", filename="medicalBot_ver4.pth")
state_dict = torch.load(model_file, map_location=torch.device('cpu'))
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.to(device)
def generate_answer(question, model, tokenizer, device):
model.eval()
input_text = "hỏi: " + question
inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True, padding="max_length")
input_ids = inputs.input_ids.to(device)
attention_mask = inputs.attention_mask.to(device)
with torch.no_grad():
outputs = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_length=128, num_beams=4, early_stopping=True)
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
return answer
def run(ques):
return generate_answer(ques, model, tokenizer, device)
demo = gr.Interface(fn=run, inputs=gr.Textbox(label="Nhập câu hỏi"), outputs=gr.Textbox(label="Câu trả lời"))
demo.launch()