Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -82,11 +82,8 @@ def generater(message, history, temperature, top_p, top_k):
|
|
82 |
prompt = "<s>"
|
83 |
for user_message, assistant_message in history:
|
84 |
prompt += model.config["promptTemplate"].format(user_message)
|
85 |
-
|
86 |
prompt += assistant_message + "</s>"
|
87 |
-
|
88 |
prompt += model.config["promptTemplate"].format(message)
|
89 |
-
|
90 |
outputs = []
|
91 |
for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
|
92 |
outputs.append(token)
|
|
|
82 |
prompt = "<s>"
|
83 |
for user_message, assistant_message in history:
|
84 |
prompt += model.config["promptTemplate"].format(user_message)
|
|
|
85 |
prompt += assistant_message + "</s>"
|
|
|
86 |
prompt += model.config["promptTemplate"].format(message)
|
|
|
87 |
outputs = []
|
88 |
for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
|
89 |
outputs.append(token)
|