Spaces:
Sleeping
Sleeping
Audio init
Browse files- app.py +34 -59
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,6 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from transformers import pipeline
|
|
|
|
|
|
|
|
|
4 |
|
5 |
"""
|
6 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
@@ -8,65 +12,36 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
8 |
|
9 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
):
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
):
|
42 |
-
token = message.choices[0].delta.content
|
43 |
-
response += token
|
44 |
-
yield response
|
45 |
-
|
46 |
-
audio = tts(response)
|
47 |
-
return response, audio
|
48 |
-
|
49 |
-
|
50 |
-
"""
|
51 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
52 |
-
"""
|
53 |
-
demo = gr.ChatInterface(
|
54 |
-
respond,
|
55 |
-
additional_inputs=[
|
56 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
57 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
58 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
59 |
-
gr.Slider(
|
60 |
-
minimum=0.1,
|
61 |
-
maximum=1.0,
|
62 |
-
value=0.95,
|
63 |
-
step=0.05,
|
64 |
-
label="Top-p (nucleus sampling)",
|
65 |
-
),
|
66 |
-
],
|
67 |
-
outputs=[gr.Textbox(), gr.Audio()],
|
68 |
-
)
|
69 |
-
|
70 |
|
71 |
if __name__ == "__main__":
|
72 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
from transformers import pipeline
|
4 |
+
from scipy.io.wavfile import write as write_wav
|
5 |
+
|
6 |
+
AUDIO_FILE_PATH = "bark_generation.wav"
|
7 |
+
synthesizer = pipeline("text-to-speech", "suno/bark-small")
|
8 |
|
9 |
"""
|
10 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
|
|
12 |
|
13 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
14 |
|
15 |
+
with gr.Blocks() as demo:
|
16 |
+
chatbot = gr.Chatbot(type="messages")
|
17 |
+
audio_box = gr.Audio(autoplay=True)
|
18 |
+
msg = gr.Textbox(submit_btn=True)
|
19 |
+
clear = gr.Button("Clear")
|
20 |
+
|
21 |
+
def synthesize_audio(text):
|
22 |
+
speech = synthesizer(text, forward_params={"do_sample": True})
|
23 |
+
write_wav(AUDIO_FILE_PATH, rate=speech["sampling_rate"], data=speech["audio"])
|
24 |
+
|
25 |
+
def user(user_message, history: list):
|
26 |
+
return "", history + [{"role": "user", "content": user_message}]
|
27 |
+
|
28 |
+
def bot(history: list):
|
29 |
+
history.append({"role": "assistant", "content": ""})
|
30 |
+
for message in client.chat_completion(
|
31 |
+
history,
|
32 |
+
stream=True,
|
33 |
+
):
|
34 |
+
token = message.choices[0].delta.content
|
35 |
+
history[-1]["content"] += token
|
36 |
+
yield history, None
|
37 |
+
|
38 |
+
synthesize_audio(history[-1]["content"])
|
39 |
+
return history, AUDIO_FILE_PATH
|
40 |
+
|
41 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
42 |
+
bot, chatbot, [chatbot, audio_box]
|
43 |
+
)
|
44 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
if __name__ == "__main__":
|
47 |
demo.launch()
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
huggingface_hub
|
2 |
unsloth
|
3 |
gradio
|
|
|
4 |
transformers
|
|
|
1 |
huggingface_hub
|
2 |
unsloth
|
3 |
gradio
|
4 |
+
scipy
|
5 |
transformers
|