Spaces:
Sleeping
Sleeping
xavierbarbier
commited on
Commit
•
8bd2156
1
Parent(s):
aaf7aaf
Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ model = model = GPT4All(model_name, model_path, allow_download = False, device="
|
|
25 |
print("Finish the model init process")
|
26 |
|
27 |
model.config["promptTemplate"] = "[INST] {0} [/INST]"
|
28 |
-
model.config["systemPrompt"] = ""
|
29 |
model._is_chat_session_activated = False
|
30 |
|
31 |
max_new_tokens = 2048
|
@@ -34,7 +34,7 @@ def generater(message, history, temperature, top_p, top_k):
|
|
34 |
prompt = "<s>"
|
35 |
for user_message, assistant_message in history:
|
36 |
prompt += model.config["promptTemplate"].format(user_message)
|
37 |
-
prompt += assistant_message + "
|
38 |
prompt += model.config["promptTemplate"].format(message)
|
39 |
outputs = []
|
40 |
for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
|
|
|
25 |
print("Finish the model init process")
|
26 |
|
27 |
model.config["promptTemplate"] = "[INST] {0} [/INST]"
|
28 |
+
model.config["systemPrompt"] = "Tu es un assitant et tu dois répondre en français"
|
29 |
model._is_chat_session_activated = False
|
30 |
|
31 |
max_new_tokens = 2048
|
|
|
34 |
prompt = "<s>"
|
35 |
for user_message, assistant_message in history:
|
36 |
prompt += model.config["promptTemplate"].format(user_message)
|
37 |
+
prompt += assistant_message + "</s>"
|
38 |
prompt += model.config["promptTemplate"].format(message)
|
39 |
outputs = []
|
40 |
for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
|