alexrods's picture
Create inference.py
d1aaae4
raw
history blame
284 Bytes
import torch
from torchtext.data.utils import get_tokenizer
from model_arch import TextClassifierModel
model_loaded = torch.load('model_checkpoint.pth')
vocab = torch.load('vocab.pt')
tokenizer = get_tokenizer("spacy", language="es")
text_pipeline = lambda x: vocab(tokenizer(x))