Spaces:
Sleeping
Sleeping
max new token update
Browse files
app.py
CHANGED
@@ -49,7 +49,7 @@ async def ask(request: Request):
|
|
49 |
print("Tokenizer process completed")
|
50 |
|
51 |
print("Model process started")
|
52 |
-
outputs = model.generate(**input_ids, max_new_tokens=
|
53 |
|
54 |
print("Tokenizer decode process started")
|
55 |
answer = tokenizer.decode(outputs[0]).split("<end_of_turn>")[1].strip()
|
|
|
49 |
print("Tokenizer process completed")
|
50 |
|
51 |
print("Model process started")
|
52 |
+
outputs = model.generate(**input_ids, max_new_tokens=512)
|
53 |
|
54 |
print("Tokenizer decode process started")
|
55 |
answer = tokenizer.decode(outputs[0]).split("<end_of_turn>")[1].strip()
|