bambadij commited on
Commit
a19aaf0
·
1 Parent(s): d74f4a3
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -65,7 +65,16 @@ async def predict(request: PredictionRequest):
65
 
66
  # Générer le texte à partir du prompt
67
  inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
68
- outputs = model.generate(inputs, max_length=3000, do_sample=True)
 
 
 
 
 
 
 
 
 
69
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
70
 
71
  return {"generated_text": generated_text}
 
65
 
66
  # Générer le texte à partir du prompt
67
  inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
68
+ attention_mask = inputs.attention_mask.to(model.device)
69
+ outputs = model.generate(
70
+ inputs,
71
+ attention_mask=attention_mask,
72
+ max_length=3000,
73
+ do_sample=True
74
+ )
75
+ # generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
76
+ # outputs = model.generate(inputs, max_length=3000, do_sample=True)
77
+
78
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
79
 
80
  return {"generated_text": generated_text}