Wootang01 commited on
Commit
d82c758
·
1 Parent(s): 019744c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -13,9 +13,9 @@ torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
13
  tokenizer = T5Tokenizer.from_pretrained('deep-learning-analytics/GrammarCorrector')
14
  model = T5ForConditionalGeneration.from_pretrained('deep-learning-analytics/GrammarCorrector').to(torch_device)
15
 
16
- def correct_grammar(input_text, num_correct_options=num_correct_options):
17
  batch = tokenizer([input_text], truncation=True, padding = 'max_length', max_length = 64, return_tensors = 'pt').to(torch_device)
18
- results = model.generate(**batch, max_length = 64, num_beams = 2, num_correct_options = num_correct_options, temperature = 1.5)
19
 
20
  return results
21
 
 
13
  tokenizer = T5Tokenizer.from_pretrained('deep-learning-analytics/GrammarCorrector')
14
  model = T5ForConditionalGeneration.from_pretrained('deep-learning-analytics/GrammarCorrector').to(torch_device)
15
 
16
+ def correct_grammar(input_text, num_return_sequences=num_correct_options):
17
  batch = tokenizer([input_text], truncation=True, padding = 'max_length', max_length = 64, return_tensors = 'pt').to(torch_device)
18
+ results = model.generate(**batch, max_length = 64, num_beams = 2, num_return_sequences = num_correct_options, temperature = 1.5)
19
 
20
  return results
21