Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,8 +7,8 @@ import spaces
|
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
10 |
-
MAX_MAX_NEW_TOKENS =
|
11 |
-
DEFAULT_MAX_NEW_TOKENS =
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
@@ -33,7 +33,7 @@ def generate(
|
|
33 |
message: str,
|
34 |
chat_history: list[tuple[str, str]],
|
35 |
system_prompt: str,
|
36 |
-
max_new_tokens: int =
|
37 |
temperature: float = 0.6,
|
38 |
top_p: float = 0.9,
|
39 |
top_k: int = 50,
|
|
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
10 |
+
MAX_MAX_NEW_TOKENS = 200
|
11 |
+
DEFAULT_MAX_NEW_TOKENS = 120
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
|
|
33 |
message: str,
|
34 |
chat_history: list[tuple[str, str]],
|
35 |
system_prompt: str,
|
36 |
+
max_new_tokens: int = 120,
|
37 |
temperature: float = 0.6,
|
38 |
top_p: float = 0.9,
|
39 |
top_k: int = 50,
|