Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,14 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-base")
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-base")
|
9 |
model_file = hf_hub_download(repo_id="tuongvxx1/medBot", filename="medicalBot_ver4.pth")
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
model.to(device)
|
12 |
|
13 |
def generate_answer(question, model, tokenizer, device):
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-base")
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-base")
|
9 |
model_file = hf_hub_download(repo_id="tuongvxx1/medBot", filename="medicalBot_ver4.pth")
|
10 |
+
|
11 |
+
state_dict = torch.load('medicalBot_ver4.pth', map_location=torch.device('cpu'))
|
12 |
+
from collections import OrderedDict
|
13 |
+
new_state_dict = OrderedDict()
|
14 |
+
for k, v in state_dict.items():
|
15 |
+
name = k.replace('module.', '')
|
16 |
+
new_state_dict[name] = v
|
17 |
+
model.load_state_dict(new_state_dict)
|
18 |
model.to(device)
|
19 |
|
20 |
def generate_answer(question, model, tokenizer, device):
|