Spaces:
Sleeping
Sleeping
WilliamGazeley
commited on
Commit
·
8741596
1
Parent(s):
6e203a2
Increase max token output
Browse files
app.py
CHANGED
@@ -23,7 +23,7 @@ def init_llm():
|
|
23 |
def get_response(prompt):
|
24 |
try:
|
25 |
prompts = [template.format(user_message=prompt)]
|
26 |
-
sampling_params = SamplingParams(temperature=0.3, top_p=0.95)
|
27 |
outputs = llm.generate(prompts, sampling_params)
|
28 |
for output in outputs:
|
29 |
return output.outputs[0].text
|
|
|
23 |
def get_response(prompt):
|
24 |
try:
|
25 |
prompts = [template.format(user_message=prompt)]
|
26 |
+
sampling_params = SamplingParams(temperature=0.3, top_p=0.95, max_tokens=500, stop_token_ids=[128009])
|
27 |
outputs = llm.generate(prompts, sampling_params)
|
28 |
for output in outputs:
|
29 |
return output.outputs[0].text
|