Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,22 +4,29 @@ from huggingface_hub import InferenceClient
|
|
4 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
5 |
|
6 |
def respond(message, history):
|
7 |
-
|
|
|
8 |
messages = [{"role": "system", "content": "You are a friendly chatbot."}]
|
9 |
-
|
10 |
if history:
|
11 |
messages.extend(history)
|
12 |
-
|
13 |
messages.append({"role": "user", "content": message})
|
14 |
-
|
15 |
-
|
16 |
messages,
|
17 |
-
max_tokens=100
|
|
|
|
|
18 |
)
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
chatbot = gr.ChatInterface(respond, type="messages")
|
23 |
|
24 |
chatbot.launch()
|
25 |
-
|
|
|
4 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
5 |
|
6 |
def respond(message, history):
|
7 |
+
response = ""
|
8 |
+
|
9 |
messages = [{"role": "system", "content": "You are a friendly chatbot."}]
|
10 |
+
|
11 |
if history:
|
12 |
messages.extend(history)
|
13 |
+
|
14 |
messages.append({"role": "user", "content": message})
|
15 |
+
|
16 |
+
stream = client.chat_completion(
|
17 |
messages,
|
18 |
+
max_tokens=100,
|
19 |
+
temperature=1.2,
|
20 |
+
stream=True
|
21 |
)
|
22 |
+
|
23 |
+
for message in stream:
|
24 |
+
token = message.choices[0].delta.content
|
25 |
+
|
26 |
+
if token is not None:
|
27 |
+
response += token
|
28 |
+
yield response
|
29 |
|
30 |
chatbot = gr.ChatInterface(respond, type="messages")
|
31 |
|
32 |
chatbot.launch()
|
|