machinelearningzuu's picture
Create README.md
b9f9195
|
raw
history blame
793 Bytes

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

GED_TOKENIZER = AutoTokenizer.from_pretrained("zuu/grammar-error-correcter") GED_MODEL = AutoModelForSeq2SeqLM.from_pretrained("zuu/grammar-error-correcter")

Incorrect text

incorrect_text = 'young children should avoid exposure to contageous disease'

Tokenize text

tokens= GED_TOKENIZER( [incorrect_text], padding=True, return_tensors='pt' )

corrections = GED_MODEL.generate(**tokens) corrections = GED_TOKENIZER.batch_decode( corrections, skip_special_tokens=True ) corrections