Spaces:
Sleeping
Sleeping
xavierbarbier
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -16,10 +16,10 @@ Mistral does not support system prompt symbol (such as ```<<SYS>>```) now, input
|
|
16 |
"""
|
17 |
|
18 |
model_path = "models"
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
print("Start the model init process")
|
24 |
model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
|
25 |
print("Finish the model init process")
|
@@ -34,7 +34,7 @@ def generater(message, history, temperature, top_p, top_k):
|
|
34 |
prompt = "<s>"
|
35 |
for user_message, assistant_message in history:
|
36 |
prompt += model.config["promptTemplate"].format(user_message)
|
37 |
-
prompt += assistant_message + "</s>"
|
38 |
prompt += model.config["promptTemplate"].format(message)
|
39 |
outputs = []
|
40 |
for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
|
|
|
16 |
"""
|
17 |
|
18 |
model_path = "models"
|
19 |
+
model_name = "mistral-7b-instruct-v0.1.Q4_K_M.gguf"
|
20 |
+
|
21 |
+
hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.1-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
|
22 |
+
|
23 |
print("Start the model init process")
|
24 |
model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
|
25 |
print("Finish the model init process")
|
|
|
34 |
prompt = "<s>"
|
35 |
for user_message, assistant_message in history:
|
36 |
prompt += model.config["promptTemplate"].format(user_message)
|
37 |
+
prompt += assistant_message + "Consigne : tu dois répondre en français</s>"
|
38 |
prompt += model.config["promptTemplate"].format(message)
|
39 |
outputs = []
|
40 |
for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
|