alexrods's picture
Update inference.py
7064c13
raw
history blame
569 Bytes
import torch
from torchtext.data.utils import get_tokenizer
from model_arch import TextClassifierModel, load_state_dict
model_trained = torch.load('model_checkpoint.pth')
vocab = torch.load('vocab.pt')
tokenizer = get_tokenizer("spacy", language="es")
text_pipeline = lambda x: vocab(tokenizer(x))
num_class = 11
vocab_size = len(vocab)
embed_size = 300
lr = 0.4
model = TextClassifierModel(vocab_size, embed_size, num_class)
optimizer = torch.optim.SGD(model_test.parameters(), lr=0.4)
model, optimizer = load_state_dict(model, optimizer, model_trained, vocab)