Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,19 +3,18 @@ from llama_cpp import Llama
|
|
3 |
|
4 |
llm = Llama(model_path="model.gguf", n_ctx=3072, n_threads=4, chat_format="chatml")
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
formatted_prompt.append({"role": "user", "content": user_prompt})
|
11 |
-
formatted_prompt.append({"role": "assistant", "content": bot_response })
|
12 |
formatted_prompt.append({"role": "user", "content": message})
|
13 |
stream_response = llm.create_chat_completion(messages=formatted_prompt, temperature=temperature, max_tokens=max_tokens, stream=True)
|
14 |
-
response
|
15 |
for chunk in stream_response:
|
16 |
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
|
17 |
-
response
|
18 |
-
yield response
|
|
|
19 |
|
20 |
mychatbot = gr.Chatbot(
|
21 |
avatar_images=["user.png", "bots.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
|
|
|
3 |
|
4 |
llm = Llama(model_path="model.gguf", n_ctx=3072, n_threads=4, chat_format="chatml")
|
5 |
|
6 |
+
formatted_prompt = [{"role": "system", "content": system_prompt}]
|
7 |
+
|
8 |
+
def generate(message, history, temperature=0.75, max_tokens=1536):
|
9 |
+
nonlocal formatted_prompt
|
|
|
|
|
10 |
formatted_prompt.append({"role": "user", "content": message})
|
11 |
stream_response = llm.create_chat_completion(messages=formatted_prompt, temperature=temperature, max_tokens=max_tokens, stream=True)
|
12 |
+
response = ""
|
13 |
for chunk in stream_response:
|
14 |
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
|
15 |
+
response += chunk['choices'][0]["delta"]["content"]
|
16 |
+
yield response
|
17 |
+
formatted_prompt.append({"role": "assistant", "content": response})
|
18 |
|
19 |
mychatbot = gr.Chatbot(
|
20 |
avatar_images=["user.png", "bots.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
|