Update app.py
Browse files
app.py
CHANGED
@@ -62,31 +62,21 @@ llama = Llama(
|
|
62 |
# yield partial_message
|
63 |
|
64 |
|
|
|
65 |
def generate_response(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
chat_prompt = system_prompt or "You are an Urdu Chatbot. Write an appropriate response for the given instruction."
|
70 |
-
chat_prompt += "\n"
|
71 |
-
|
72 |
# Add history to the prompt
|
73 |
for user, bot in history:
|
74 |
-
chat_prompt += f"\n### Instruction
|
75 |
-
|
76 |
# Add current message
|
77 |
-
chat_prompt += f"\n### Instruction
|
|
|
|
|
78 |
|
79 |
-
response = llama(
|
80 |
-
chat_prompt,
|
81 |
-
temperature=temperature,
|
82 |
-
max_tokens=max_new_tokens,
|
83 |
-
top_k=top_k,
|
84 |
-
repeat_penalty=repetition_penalty,
|
85 |
-
top_p=top_p,
|
86 |
-
stop=["###", "### Instruction:", "\n### Instruction:", "Q:"],
|
87 |
-
echo=False,
|
88 |
-
stream=True
|
89 |
-
)
|
90 |
|
91 |
text = ""
|
92 |
for chunk in response:
|
|
|
62 |
# yield partial_message
|
63 |
|
64 |
|
65 |
+
# Function to generate responses
|
66 |
def generate_response(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
|
67 |
+
chat_prompt = system_prompt or (
|
68 |
+
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n"
|
69 |
+
)
|
|
|
|
|
|
|
70 |
# Add history to the prompt
|
71 |
for user, bot in history:
|
72 |
+
chat_prompt += f"\n### Instruction:{user}\n\n### Response:{bot}\n"
|
73 |
+
|
74 |
# Add current message
|
75 |
+
chat_prompt += f"\n### Instruction:{message}\n\n### Response:"
|
76 |
+
|
77 |
+
print(chat_prompt)
|
78 |
|
79 |
+
response = llama(chat_prompt, temperature=temperature, max_tokens=max_new_tokens, top_k=top_k, repeat_penalty=repetition_penalty, top_p=top_p, stop=["Q:", "\n"], echo=False, stream=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
text = ""
|
82 |
for chunk in response:
|