Spaces:
Sleeping
Sleeping
mriusero
commited on
Commit
·
fabb668
1
Parent(s):
b3b929b
fix: respond shape
Browse files- app.py +1 -1
- requirements.txt +2 -2
- src/chat.py +6 -9
app.py
CHANGED
@@ -43,7 +43,7 @@ with gr.Blocks(theme=custom_theme) as demo:
|
|
43 |
"""
|
44 |
)
|
45 |
gr.HTML("<div style='margin-bottom: 40px;'></div>")
|
46 |
-
chatbot = gr.ChatInterface(respond)
|
47 |
|
48 |
# DASHBOARD
|
49 |
with gr.Tab("Dashboard"):
|
|
|
43 |
"""
|
44 |
)
|
45 |
gr.HTML("<div style='margin-bottom: 40px;'></div>")
|
46 |
+
chatbot = gr.ChatInterface(respond, type='messages')
|
47 |
|
48 |
# DASHBOARD
|
49 |
with gr.Tab("Dashboard"):
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
huggingface_hub
|
2 |
-
gradio
|
3 |
numpy
|
4 |
pandas
|
|
|
1 |
+
huggingface_hub
|
2 |
+
gradio==5.33.0
|
3 |
numpy
|
4 |
pandas
|
src/chat.py
CHANGED
@@ -2,14 +2,12 @@ from huggingface_hub import InferenceClient
|
|
2 |
|
3 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
4 |
|
5 |
-
def respond(
|
6 |
-
message
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
top_p,
|
12 |
-
):
|
13 |
messages = [{"role": "system", "content": system_message}]
|
14 |
|
15 |
for val in history:
|
@@ -30,6 +28,5 @@ def respond(
|
|
30 |
top_p=top_p,
|
31 |
):
|
32 |
token = message.choices[0].delta.content
|
33 |
-
|
34 |
response += token
|
35 |
yield response
|
|
|
2 |
|
3 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
4 |
|
5 |
+
def respond(message, history):
|
6 |
+
system_message = "Your default system message"
|
7 |
+
max_tokens = 150
|
8 |
+
temperature = 0.7
|
9 |
+
top_p = 0.9
|
10 |
+
|
|
|
|
|
11 |
messages = [{"role": "system", "content": system_message}]
|
12 |
|
13 |
for val in history:
|
|
|
28 |
top_p=top_p,
|
29 |
):
|
30 |
token = message.choices[0].delta.content
|
|
|
31 |
response += token
|
32 |
yield response
|