Kabilash10 commited on
Commit
e03d86e
·
verified ·
1 Parent(s): 401ed9a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import openai
4
+ import asyncio
5
+ from deepgram import Deepgram
6
+ from vocode.streaming.models.transcriber import (
7
+ DeepgramTranscriberConfig,
8
+ PunctuationEndpointingConfig,
9
+ )
10
+ from vocode.streaming.models.agent import ChatGPTAgentConfig
11
+ from vocode.streaming.models.message import BaseMessage
12
+ from vocode.streaming.models.synthesizer import ElevenLabsSynthesizerConfig
13
+ from vocode.streaming.transcriber.deepgram_transcriber import DeepgramTranscriber
14
+ from vocode.streaming.agent.chat_gpt_agent import ChatGPTAgent
15
+ from vocode.streaming.synthesizer.eleven_labs_synthesizer import ElevenLabsSynthesizer
16
+ from vocode.streaming.streaming_conversation import StreamingConversation
17
+ from vocode.helpers import create_streaming_microphone_input_and_speaker_output
18
+
19
+ # Initialize API keys and voice IDs
20
+ DEEPGRAM_API_KEY = "cc96b784b5b5b313336f4986cb29574f9e97e446"
21
+ ELEVEN_LABS_API_KEY = "sk_b4fc90106fafc4cb986ca25c858b98b4a3aaa15b4b851d45"
22
+ VOICE_ID = "GBv7mTt0atIp3Br8iCZE"
23
+ OPENAI_API_KEY = "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"
24
+
25
+ # Initialize OpenAI client
26
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
27
+
28
+ # Initialize Deepgram
29
+ deepgram = Deepgram(DEEPGRAM_API_KEY)
30
+
31
+ # Function to transcribe audio using Deepgram
32
+ async def transcribe_audio(audio_file_path):
33
+ with open(audio_file_path, 'rb') as audio_file:
34
+ audio_data = audio_file.read()
35
+
36
+ response = await deepgram.transcription.prerecorded(
37
+ {"buffer": audio_data, "mimetype": "audio/wav"},
38
+ {'punctuate': True, 'language': 'en'}
39
+ )
40
+ transcription = response['results']['channels'][0]['alternatives'][0]['transcript']
41
+ return transcription
42
+
43
+ # Function to generate content using OpenAI GPT-4
44
+ def generate_content(input_text):
45
+ response = client.chat.completions.create(
46
+ model="gpt-4",
47
+ messages=[
48
+ {"role": "system", "content": "You are a helpful assistant."},
49
+ {"role": "user", "content": input_text}
50
+ ]
51
+ )
52
+ generated_text = response.choices[0].message.content.strip()
53
+ return generated_text
54
+
55
+ # Function to convert text to speech using Eleven Labs
56
+ def text_to_speech(text):
57
+ url = f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}"
58
+ headers = {
59
+ "Accept": "audio/mpeg",
60
+ "Content-Type": "application/json",
61
+ "xi-api-key": ELEVEN_LABS_API_KEY
62
+ }
63
+ data = {
64
+ "text": text,
65
+ "voice_settings": {
66
+ "stability": 0.75,
67
+ "similarity_boost": 0.75
68
+ }
69
+ }
70
+ response = requests.post(url, json=data, headers=headers)
71
+
72
+ if response.status_code == 200:
73
+ with open("output.mp3", "wb") as f:
74
+ f.write(response.content)
75
+ return "output.mp3"
76
+ else:
77
+ return f"Error: {response.status_code} - {response.text}"
78
+
79
+ # Main function to handle the entire process
80
+ async def process_audio(audio):
81
+ transcription = await transcribe_audio(audio)
82
+ generated_text = generate_content(transcription)
83
+ audio_file = text_to_speech(generated_text)
84
+ return transcription, generated_text, audio_file
85
+
86
+ # Gradio interface setup
87
+ interface = gr.Interface(
88
+ fn=lambda audio: asyncio.run(process_audio(audio)),
89
+ inputs=gr.Audio(type="filepath", label="Speak into your microphone"),
90
+ outputs=[
91
+ gr.Textbox(label="Transcription Output"),
92
+ gr.Textbox(label="Generated Content"),
93
+ gr.Audio(label="Synthesized Speech")
94
+ ],
95
+ title="Speech-to-Text, Content Generation, and Text-to-Speech",
96
+ description="Speak into the microphone, and the system will transcribe your speech, generate content, and convert the generated text into speech."
97
+ )
98
+
99
+ # Launch the Gradio interface
100
+ interface.launch()