Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from huggingface_hub import InferenceClient
|
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
"""
|
7 |
-
client = InferenceClient(
|
8 |
|
9 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
10 |
messages = [{"role": "system", "content": system_message}]
|
@@ -18,6 +18,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
18 |
response = ""
|
19 |
try:
|
20 |
for message in client.chat_completion(
|
|
|
21 |
messages=messages,
|
22 |
max_tokens=max_tokens,
|
23 |
stream=True,
|
|
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
"""
|
7 |
+
client = InferenceClient()
|
8 |
|
9 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
10 |
messages = [{"role": "system", "content": system_message}]
|
|
|
18 |
response = ""
|
19 |
try:
|
20 |
for message in client.chat_completion(
|
21 |
+
model="mistralai/Codestral-22B-v0.1",
|
22 |
messages=messages,
|
23 |
max_tokens=max_tokens,
|
24 |
stream=True,
|