WilliamGazeley commited on
Commit
8741596
·
1 Parent(s): 6e203a2

Increase max token output

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -23,7 +23,7 @@ def init_llm():
23
  def get_response(prompt):
24
  try:
25
  prompts = [template.format(user_message=prompt)]
26
- sampling_params = SamplingParams(temperature=0.3, top_p=0.95)
27
  outputs = llm.generate(prompts, sampling_params)
28
  for output in outputs:
29
  return output.outputs[0].text
 
23
  def get_response(prompt):
24
  try:
25
  prompts = [template.format(user_message=prompt)]
26
+ sampling_params = SamplingParams(temperature=0.3, top_p=0.95, max_tokens=500, stop_token_ids=[128009])
27
  outputs = llm.generate(prompts, sampling_params)
28
  for output in outputs:
29
  return output.outputs[0].text