Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,11 @@
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import whisper
|
3 |
from groq import Groq
|
4 |
from gtts import gTTS
|
5 |
import tempfile
|
6 |
-
import IPython.display as ipd
|
7 |
import gradio as gr
|
8 |
|
9 |
# Step 1: Set up Whisper for transcription
|
@@ -14,8 +16,10 @@ def transcribe_audio(audio_file):
|
|
14 |
result = model.transcribe(audio_file)
|
15 |
return result["text"]
|
16 |
|
17 |
-
|
18 |
-
|
|
|
|
|
19 |
# Function to get a response from the Groq LLM (Llama 3)
|
20 |
def get_groq_response(text):
|
21 |
chat_completion = client.chat.completions.create(
|
@@ -33,10 +37,8 @@ def text_to_speech(text):
|
|
33 |
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
34 |
tts.save(temp_file.name)
|
35 |
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
return temp_file.name # Return file path for further use
|
40 |
|
41 |
# Step 4: Integrate everything into a Gradio interface
|
42 |
def voice_chatbot(audio_input):
|
@@ -59,4 +61,4 @@ iface = gr.Interface(fn=voice_chatbot,
|
|
59 |
live=True)
|
60 |
|
61 |
# Launch the Gradio interface
|
62 |
-
iface.launch()
|
|
|
1 |
+
# Install required libraries
|
2 |
+
!pip install openai-whisper gradio gtts groq
|
3 |
+
|
4 |
import os
|
5 |
import whisper
|
6 |
from groq import Groq
|
7 |
from gtts import gTTS
|
8 |
import tempfile
|
|
|
9 |
import gradio as gr
|
10 |
|
11 |
# Step 1: Set up Whisper for transcription
|
|
|
16 |
result = model.transcribe(audio_file)
|
17 |
return result["text"]
|
18 |
|
19 |
+
# Step 2: Set up Groq API for interacting with the LLM (e.g., Llama 3)
|
20 |
+
api_key = "your_groq_api_key" # Replace with your actual API key
|
21 |
+
client = Groq(api_key=api_key)
|
22 |
+
|
23 |
# Function to get a response from the Groq LLM (Llama 3)
|
24 |
def get_groq_response(text):
|
25 |
chat_completion = client.chat.completions.create(
|
|
|
37 |
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
38 |
tts.save(temp_file.name)
|
39 |
|
40 |
+
# Return the file path for Gradio to play the audio
|
41 |
+
return temp_file.name
|
|
|
|
|
42 |
|
43 |
# Step 4: Integrate everything into a Gradio interface
|
44 |
def voice_chatbot(audio_input):
|
|
|
61 |
live=True)
|
62 |
|
63 |
# Launch the Gradio interface
|
64 |
+
iface.launch()
|