SaviAnna commited on
Commit
f98d493
·
verified ·
1 Parent(s): 356b987

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -42,15 +42,14 @@ def generate_text(model, tokenizer, prompt, max_len, temperature):
42
  # pad_token_id=tokenizer.eos_token_id)
43
  output = model.generate(
44
  input_ids,
45
- max_length=max_length,
46
  temperature=temperature, # Controls the diversity of the generated text
47
  top_k=50, # Keeps only the top-k most likely words
48
  top_p=0.9, # Nucleus sampling (cumulative probability)
49
  repetition_penalty=1.2, # Penalty for repeating words or phrases
50
  no_repeat_ngram_size=4, # Prevents repetition of n-grams (e.g., bigrams)
51
  do_sample=True, # Enables sampling for greater diversity
52
- pad_token_id=tokenizer.eos_token_id,
53
- max_length=max_len)[0]
54
 
55
 
56
  #generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
 
42
  # pad_token_id=tokenizer.eos_token_id)
43
  output = model.generate(
44
  input_ids,
45
+ max_length=max_len,
46
  temperature=temperature, # Controls the diversity of the generated text
47
  top_k=50, # Keeps only the top-k most likely words
48
  top_p=0.9, # Nucleus sampling (cumulative probability)
49
  repetition_penalty=1.2, # Penalty for repeating words or phrases
50
  no_repeat_ngram_size=4, # Prevents repetition of n-grams (e.g., bigrams)
51
  do_sample=True, # Enables sampling for greater diversity
52
+ pad_token_id=tokenizer.eos_token_id)[0]
 
53
 
54
 
55
  #generated_text = tokenizer.decode(output[0], skip_special_tokens=True)