Wootang01 commited on
Commit
5904f8c
·
1 Parent(s): 075f6ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,7 +5,7 @@ st.write("Paste or type text, submit and the machine will attempt to correct you
5
 
6
  default_text = "This should working"
7
  sent = st.text_area("Text", default_text, height=40)
8
- num_correct_options = st.number_input('Number of Correction Options', min_value=1, max_value=2, value=1, step=1)
9
 
10
  from transformers import T5ForConditionalGeneration, T5Tokenizer
11
  import torch
@@ -15,7 +15,7 @@ model = T5ForConditionalGeneration.from_pretrained('deep-learning-analytics/Gram
15
 
16
  def correct_grammar(input_text, num_return_sequences=num_correct_options):
17
  batch = tokenizer([input_text], truncation=True, padding = 'max_length', max_length = 64, return_tensors = 'pt').to(torch_device)
18
- results = model.generate(**batch, max_length = 64, num_beams = 3, num_return_sequences = num_correct_options, temperature = 1.5)
19
 
20
  return results
21
 
 
5
 
6
  default_text = "This should working"
7
  sent = st.text_area("Text", default_text, height=40)
8
+ num_correct_options = st.number_input('Number of Correction Options', min_value=1, max_value=3, value=1, step=1)
9
 
10
  from transformers import T5ForConditionalGeneration, T5Tokenizer
11
  import torch
 
15
 
16
  def correct_grammar(input_text, num_return_sequences=num_correct_options):
17
  batch = tokenizer([input_text], truncation=True, padding = 'max_length', max_length = 64, return_tensors = 'pt').to(torch_device)
18
+ results = model.generate(**batch, max_length = 64, num_beams = 4, num_return_sequences = num_correct_options, temperature = 1.5)
19
 
20
  return results
21