Kabilash10 commited on
Commit
630a9eb
·
verified ·
1 Parent(s): 5cb388e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -100
app.py CHANGED
@@ -1,100 +1,101 @@
1
- import gradio as gr
2
- import requests
3
- import openai
4
- import asyncio
5
- from deepgram import Deepgram
6
- from vocode.streaming.models.transcriber import (
7
- DeepgramTranscriberConfig,
8
- PunctuationEndpointingConfig,
9
- )
10
- from vocode.streaming.models.agent import ChatGPTAgentConfig
11
- from vocode.streaming.models.message import BaseMessage
12
- from vocode.streaming.models.synthesizer import ElevenLabsSynthesizerConfig
13
- from vocode.streaming.transcriber.deepgram_transcriber import DeepgramTranscriber
14
- from vocode.streaming.agent.chat_gpt_agent import ChatGPTAgent
15
- from vocode.streaming.synthesizer.eleven_labs_synthesizer import ElevenLabsSynthesizer
16
- from vocode.streaming.streaming_conversation import StreamingConversation
17
- from vocode.helpers import create_streaming_microphone_input_and_speaker_output
18
-
19
- # Initialize API keys and voice IDs
20
- DEEPGRAM_API_KEY = "cc96b784b5b5b313336f4986cb29574f9e97e446"
21
- ELEVEN_LABS_API_KEY = "sk_b4fc90106fafc4cb986ca25c858b98b4a3aaa15b4b851d45"
22
- VOICE_ID = "GBv7mTt0atIp3Br8iCZE"
23
- OPENAI_API_KEY = "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"
24
-
25
- # Initialize OpenAI client
26
- client = openai.OpenAI(api_key=OPENAI_API_KEY)
27
-
28
- # Initialize Deepgram
29
- deepgram = Deepgram(DEEPGRAM_API_KEY)
30
-
31
- # Function to transcribe audio using Deepgram
32
- async def transcribe_audio(audio_file_path):
33
- with open(audio_file_path, 'rb') as audio_file:
34
- audio_data = audio_file.read()
35
-
36
- response = await deepgram.transcription.prerecorded(
37
- {"buffer": audio_data, "mimetype": "audio/wav"},
38
- {'punctuate': True, 'language': 'en'}
39
- )
40
- transcription = response['results']['channels'][0]['alternatives'][0]['transcript']
41
- return transcription
42
-
43
- # Function to generate content using OpenAI GPT-4
44
- def generate_content(input_text):
45
- response = client.chat.completions.create(
46
- model="gpt-4",
47
- messages=[
48
- {"role": "system", "content": "You are a helpful assistant."},
49
- {"role": "user", "content": input_text}
50
- ]
51
- )
52
- generated_text = response.choices[0].message.content.strip()
53
- return generated_text
54
-
55
- # Function to convert text to speech using Eleven Labs
56
- def text_to_speech(text):
57
- url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}"
58
- headers = {
59
- "Accept": "audio/mpeg",
60
- "Content-Type": "application/json",
61
- "xi-api-key": ELEVEN_LABS_API_KEY
62
- }
63
- data = {
64
- "text": text,
65
- "voice_settings": {
66
- "stability": 0.75,
67
- "similarity_boost": 0.75
68
- }
69
- }
70
- response = requests.post(url, json=data, headers=headers)
71
-
72
- if response.status_code == 200:
73
- with open("output.mp3", "wb") as f:
74
- f.write(response.content)
75
- return "output.mp3"
76
- else:
77
- return f"Error: {response.status_code} - {response.text}"
78
-
79
- # Main function to handle the entire process
80
- async def process_audio(audio):
81
- transcription = await transcribe_audio(audio)
82
- generated_text = generate_content(transcription)
83
- audio_file = text_to_speech(generated_text)
84
- return transcription, generated_text, audio_file
85
-
86
- # Gradio interface setup
87
- interface = gr.Interface(
88
- fn=lambda audio: asyncio.run(process_audio(audio)),
89
- inputs=gr.Audio(type="filepath", label="Speak into your microphone"),
90
- outputs=[
91
- gr.Textbox(label="Transcription Output"),
92
- gr.Textbox(label="Generated Content"),
93
- gr.Audio(label="Synthesized Speech")
94
- ],
95
- title="Speech-to-Text, Content Generation, and Text-to-Speech",
96
- description="Speak into the microphone, and the system will transcribe your speech, generate content, and convert the generated text into speech."
97
- )
98
-
99
- # Launch the Gradio interface
100
- interface.launch()
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import openai
4
+ import asyncio
5
+ import os
6
+ from deepgram import Deepgram
7
+ from vocode.streaming.models.transcriber import (
8
+ DeepgramTranscriberConfig,
9
+ PunctuationEndpointingConfig,
10
+ )
11
+ from vocode.streaming.models.agent import ChatGPTAgentConfig
12
+ from vocode.streaming.models.message import BaseMessage
13
+ from vocode.streaming.models.synthesizer import ElevenLabsSynthesizerConfig
14
+ from vocode.streaming.transcriber.deepgram_transcriber import DeepgramTranscriber
15
+ from vocode.streaming.agent.chat_gpt_agent import ChatGPTAgent
16
+ from vocode.streaming.synthesizer.eleven_labs_synthesizer import ElevenLabsSynthesizer
17
+ from vocode.streaming.streaming_conversation import StreamingConversation
18
+ from vocode.helpers import create_streaming_microphone_input_and_speaker_output
19
+
20
+ # Fetch API keys and voice IDs from environment variables
21
+ DEEPGRAM_API_KEY = os.getenv("DEEPGRAM_API_KEY")
22
+ ELEVEN_LABS_API_KEY = os.getenv("ELEVEN_LABS_API_KEY")
23
+ VOICE_ID = os.getenv("VOICE_ID")
24
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
25
+
26
+ # Initialize OpenAI client
27
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
28
+
29
+ # Initialize Deepgram
30
+ deepgram = Deepgram(DEEPGRAM_API_KEY)
31
+
32
+ # Function to transcribe audio using Deepgram
33
+ async def transcribe_audio(audio_file_path):
34
+ with open(audio_file_path, 'rb') as audio_file:
35
+ audio_data = audio_file.read()
36
+
37
+ response = await deepgram.transcription.prerecorded(
38
+ {"buffer": audio_data, "mimetype": "audio/wav"},
39
+ {'punctuate': True, 'language': 'en'}
40
+ )
41
+ transcription = response['results']['channels'][0]['alternatives'][0]['transcript']
42
+ return transcription
43
+
44
+ # Function to generate content using OpenAI GPT-4
45
+ def generate_content(input_text):
46
+ response = client.chat.completions.create(
47
+ model="gpt-4",
48
+ messages=[
49
+ {"role": "system", "content": "You are a helpful assistant."},
50
+ {"role": "user", "content": input_text}
51
+ ]
52
+ )
53
+ generated_text = response.choices[0].message.content.strip()
54
+ return generated_text
55
+
56
+ # Function to convert text to speech using Eleven Labs
57
+ def text_to_speech(text):
58
+ url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}"
59
+ headers = {
60
+ "Accept": "audio/mpeg",
61
+ "Content-Type": "application/json",
62
+ "xi-api-key": ELEVEN_LABS_API_KEY
63
+ }
64
+ data = {
65
+ "text": text,
66
+ "voice_settings": {
67
+ "stability": 0.75,
68
+ "similarity_boost": 0.75
69
+ }
70
+ }
71
+ response = requests.post(url, json=data, headers=headers)
72
+
73
+ if response.status_code == 200:
74
+ with open("output.mp3", "wb") as f:
75
+ f.write(response.content)
76
+ return "output.mp3"
77
+ else:
78
+ return f"Error: {response.status_code} - {response.text}"
79
+
80
+ # Main function to handle the entire process
81
+ async def process_audio(audio):
82
+ transcription = await transcribe_audio(audio)
83
+ generated_text = generate_content(transcription)
84
+ audio_file = text_to_speech(generated_text)
85
+ return transcription, generated_text, audio_file
86
+
87
+ # Gradio interface setup
88
+ interface = gr.Interface(
89
+ fn=lambda audio: asyncio.run(process_audio(audio)),
90
+ inputs=gr.Audio(type="filepath", label="Speak into your microphone"),
91
+ outputs=[
92
+ gr.Textbox(label="Transcription Output"),
93
+ gr.Textbox(label="Generated Content"),
94
+ gr.Audio(label="Synthesized Speech")
95
+ ],
96
+ title="Speech-to-Text, Content Generation, and Text-to-Speech",
97
+ description="Speak into the microphone, and the system will transcribe your speech, generate content, and convert the generated text into speech."
98
+ )
99
+
100
+ # Launch the Gradio interface
101
+ interface.launch()