Ahsan658 commited on
Commit
c3f535c
·
verified ·
1 Parent(s): ab65634

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import os
3
+ from gtts import gTTS
4
+ import gradio as gr
5
+ from groq import Groq
6
+
7
+ # Load Whisper model for transcription
8
+ model = whisper.load_model("base")
9
+
10
+ GROQ_API_KEY='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'
11
+ client= groq(api_key=GROQ_API_KEY)
12
+
13
+ # Function to get the LLM response from Groq
14
+ def get_llm_response(user_input):
15
+ chat_completion = client.chat.completions.create(
16
+ messages=[{"role": "user", "content": user_input}],
17
+ model="llama3-8b-8192", # Replace with your desired model
18
+ )
19
+ return chat_completion.choices[0].message.content
20
+
21
+ # Function to convert text to speech using gTTS
22
+ def text_to_speech(text, output_audio="output_audio.mp3"):
23
+ tts = gTTS(text)
24
+ tts.save(output_audio)
25
+ return output_audio
26
+
27
+ # Main chatbot function to handle audio input and output
28
+ def chatbot(audio):
29
+ # Step 1: Transcribe the audio using Whisper
30
+ result = model.transcribe(audio)
31
+ user_text = result["text"]
32
+
33
+ # Step 2: Get LLM response from Groq
34
+ response_text = get_llm_response(user_text)
35
+
36
+ # Step 3: Convert the response text to speech
37
+ output_audio = text_to_speech(response_text)
38
+
39
+ return response_text, output_audio
40
+
41
+ # Gradio interface for real-time interaction
42
+ iface = gr.Interface(
43
+ fn=chatbot,
44
+ inputs=gr.Audio(type="filepath"), # Input from mic or file
45
+ outputs=[gr.Textbox(), gr.Audio(type="filepath")], # Output: response text and audio
46
+ live=True
47
+ )
48
+
49
+ # Launch the Gradio app
50
+ iface.launch()