saima730 commited on
Commit
dde51c5
·
verified ·
1 Parent(s): c412199

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -25
app.py CHANGED
@@ -1,6 +1,5 @@
1
  !pip install gradio groq openai-whisper gtts
2
 
3
-
4
  import os
5
  import gradio as gr
6
  from groq import Groq
@@ -9,35 +8,42 @@ from gtts import gTTS
9
  import tempfile
10
 
11
  # Set up Groq API key
12
- os.environ['GROQ_API_KEY'] = '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'
13
  groq_client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
14
 
15
  # Load Whisper model
16
  whisper_model = whisper.load_model("base")
17
 
18
  def process_audio(audio_file):
19
- # Transcribe audio using Whisper
20
- result = whisper_model.transcribe(audio_file)
21
- user_text = result['text']
22
-
23
- # Generate response using Llama 8b model with Groq API
24
- chat_completion = groq_client.chat.completions.create(
25
- messages=[
26
- {
27
- "role": "user",
28
- "content": user_text,
29
- }
30
- ],
31
- model="llama3-8b-8192",
32
- )
33
- response_text = chat_completion.choices[0].message.content
34
-
35
- # Convert response text to speech using gTTS
36
- tts = gTTS(text=response_text, lang='en')
37
- audio_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
38
- tts.save(audio_file.name)
39
-
40
- return response_text, audio_file.name
 
 
 
 
 
 
 
41
 
42
  # Create Gradio interface
43
  iface = gr.Interface(
@@ -47,4 +53,5 @@ iface = gr.Interface(
47
  live=True
48
  )
49
 
50
- iface.launch()
 
 
1
  !pip install gradio groq openai-whisper gtts
2
 
 
3
  import os
4
  import gradio as gr
5
  from groq import Groq
 
8
  import tempfile
9
 
10
  # Set up Groq API key
11
+ os.environ['GROQ_API_KEY'] = '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' # Replace with your valid key
12
  groq_client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
13
 
14
  # Load Whisper model
15
  whisper_model = whisper.load_model("base")
16
 
17
  def process_audio(audio_file):
18
+ try:
19
+ # Transcribe audio using Whisper
20
+ result = whisper_model.transcribe(audio_file)
21
+ user_text = result['text']
22
+
23
+ # Generate response using Llama 8b model with Groq API
24
+ chat_completion = groq_client.chat.completions.create(
25
+ messages=[
26
+ {
27
+ "role": "user",
28
+ "content": user_text,
29
+ }
30
+ ],
31
+ model="llama3-8b-8192",
32
+ )
33
+ response_text = chat_completion.choices[0].message.content
34
+
35
+ # Convert response text to speech using gTTS
36
+ tts = gTTS(text=response_text, lang='en')
37
+
38
+ # Saving to a temporary file
39
+ audio_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
40
+ tts.save(audio_file.name)
41
+
42
+ return response_text, audio_file.name
43
+
44
+ except Exception as e:
45
+ print("Error:", e)
46
+ return str(e), None
47
 
48
  # Create Gradio interface
49
  iface = gr.Interface(
 
53
  live=True
54
  )
55
 
56
+ iface.launch()
57
+