ajeetkumar01 commited on
Commit
c458574
·
verified ·
1 Parent(s): 27be2eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -26,7 +26,7 @@ def generate_text(input_text, max_length=16, num_beams=5, do_sample=False, no_re
26
  - generated_text (str): The generated text.
27
  """
28
  # Encode the input text and move it to the appropriate device
29
- input_ids = tokenizer(input_text, return_tensors='pt')['input_ids'].to(device)
30
  # Generate text using the model
31
  output = model.generate(input_ids, max_length=max_length, num_beams=num_beams,
32
  do_sample=do_sample, no_repeat_ngram_size=no_repeat_ngram_size)
@@ -49,7 +49,7 @@ def generate_text_with_nucleus_search(input_text, max_length=16, do_sample=True,
49
  - generated_text (str): The generated text.
50
  """
51
  # Encode the input text and move it to the appropriate device
52
- input_ids = tokenizer(input_text, return_tensors='pt')['input_ids'].to(device)
53
  # Generate text using nucleus sampling
54
  output = model.generate(input_ids, max_length=max_length, do_sample=do_sample, top_p=top_p)
55
  # Decode the generated output
 
26
  - generated_text (str): The generated text.
27
  """
28
  # Encode the input text and move it to the appropriate device
29
+ input_ids = tokenizer(input_text, return_tensors='pt')['input_ids']
30
  # Generate text using the model
31
  output = model.generate(input_ids, max_length=max_length, num_beams=num_beams,
32
  do_sample=do_sample, no_repeat_ngram_size=no_repeat_ngram_size)
 
49
  - generated_text (str): The generated text.
50
  """
51
  # Encode the input text and move it to the appropriate device
52
+ input_ids = tokenizer(input_text, return_tensors='pt')['input_ids']
53
  # Generate text using nucleus sampling
54
  output = model.generate(input_ids, max_length=max_length, do_sample=do_sample, top_p=top_p)
55
  # Decode the generated output