File size: 284 Bytes
d1aaae4
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
import torch
from torchtext.data.utils import get_tokenizer
from model_arch import TextClassifierModel

model_loaded = torch.load('model_checkpoint.pth')
vocab = torch.load('vocab.pt')
tokenizer = get_tokenizer("spacy", language="es")

text_pipeline = lambda x: vocab(tokenizer(x))