Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,7 @@ from huggingface_hub import InferenceClient
|
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
"""
|
7 |
-
|
8 |
-
client = InferenceClient("qwen2.5:0.5b")
|
9 |
|
10 |
|
11 |
def respond(
|
@@ -16,7 +15,6 @@ def respond(
|
|
16 |
temperature,
|
17 |
top_p,
|
18 |
):
|
19 |
-
# Prepara as mensagens para a API
|
20 |
messages = [{"role": "system", "content": system_message}]
|
21 |
|
22 |
for val in history:
|
@@ -27,46 +25,24 @@ def respond(
|
|
27 |
|
28 |
messages.append({"role": "user", "content": message})
|
29 |
|
30 |
-
response = ""
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
stream=True
|
41 |
-
)
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
if 'choices' not in message or len(message['choices']) == 0 or 'delta' not in message['choices'][0]:
|
46 |
-
raise ValueError("Resposta inesperada do modelo.")
|
47 |
-
|
48 |
-
token = message['choices'][0]['delta']['content']
|
49 |
-
response += token # Acumula o conteúdo
|
50 |
-
|
51 |
-
# Retorna a resposta incrementalmente
|
52 |
-
yield response
|
53 |
-
|
54 |
-
except ValueError as e:
|
55 |
-
print(f"Erro de valor: {e}")
|
56 |
-
except ConnectionError as e:
|
57 |
-
print(f"Erro de conexão: {e}")
|
58 |
-
except TimeoutError as e:
|
59 |
-
print(f"Erro de tempo: {e}")
|
60 |
-
except Exception as e:
|
61 |
-
print(f"Erro inesperado: {e}")
|
62 |
-
|
63 |
-
return response # Retorna a resposta final ao final do processamento
|
64 |
|
65 |
|
66 |
"""
|
67 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
68 |
"""
|
69 |
-
# Criando a interface Gradio
|
70 |
demo = gr.ChatInterface(
|
71 |
respond,
|
72 |
additional_inputs=[
|
@@ -85,4 +61,4 @@ demo = gr.ChatInterface(
|
|
85 |
|
86 |
|
87 |
if __name__ == "__main__":
|
88 |
-
demo.launch()
|
|
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
"""
|
7 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
|
|
8 |
|
9 |
|
10 |
def respond(
|
|
|
15 |
temperature,
|
16 |
top_p,
|
17 |
):
|
|
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
for val in history:
|
|
|
25 |
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
28 |
+
response = ""
|
29 |
|
30 |
+
for message in client.chat_completion(
|
31 |
+
messages,
|
32 |
+
max_tokens=max_tokens,
|
33 |
+
stream=True,
|
34 |
+
temperature=temperature,
|
35 |
+
top_p=top_p,
|
36 |
+
):
|
37 |
+
token = message.choices[0].delta.content
|
|
|
|
|
38 |
|
39 |
+
response += token
|
40 |
+
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
|
43 |
"""
|
44 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
45 |
"""
|
|
|
46 |
demo = gr.ChatInterface(
|
47 |
respond,
|
48 |
additional_inputs=[
|
|
|
61 |
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
+
demo.launch()
|