shukdevdatta123 commited on
Commit
2dd20d9
·
verified ·
1 Parent(s): c4bd9c2

Delete v2.txt

Browse files
Files changed (1) hide show
  1. v2.txt +0 -329
v2.txt DELETED
@@ -1,329 +0,0 @@
1
- import base64
2
- import tempfile
3
- import os
4
- import requests
5
- import gradio as gr
6
- import random
7
- from openai import OpenAI
8
-
9
- # Available voices for audio generation
10
- VOICES = ["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
11
-
12
- # Example audio URLs
13
- EXAMPLE_AUDIO_URLS = [
14
- "https://cdn.openai.com/API/docs/audio/alloy.wav",
15
- "https://cdn.openai.com/API/docs/audio/ash.wav",
16
- "https://cdn.openai.com/API/docs/audio/coral.wav",
17
- "https://cdn.openai.com/API/docs/audio/echo.wav",
18
- "https://cdn.openai.com/API/docs/audio/fable.wav",
19
- "https://cdn.openai.com/API/docs/audio/onyx.wav",
20
- "https://cdn.openai.com/API/docs/audio/nova.wav",
21
- "https://cdn.openai.com/API/docs/audio/sage.wav",
22
- "https://cdn.openai.com/API/docs/audio/shimmer.wav"
23
- ]
24
-
25
- def process_text_input(api_key, text_prompt, selected_voice):
26
- """Generate audio response from text input"""
27
- try:
28
- # Initialize OpenAI client with the provided API key
29
- client = OpenAI(api_key=api_key)
30
-
31
- completion = client.chat.completions.create(
32
- model="gpt-4o-audio-preview",
33
- modalities=["text", "audio"],
34
- audio={"voice": selected_voice, "format": "wav"},
35
- messages=[
36
- {
37
- "role": "user",
38
- "content": text_prompt
39
- }
40
- ]
41
- )
42
-
43
- # Save the audio to a temporary file
44
- wav_bytes = base64.b64decode(completion.choices[0].message.audio.data)
45
- temp_path = tempfile.mktemp(suffix=".wav")
46
- with open(temp_path, "wb") as f:
47
- f.write(wav_bytes)
48
-
49
- # Get the text response directly from the API
50
- text_response = completion.choices[0].message.content
51
-
52
- return text_response, temp_path
53
- except Exception as e:
54
- return f"Error: {str(e)}", None
55
-
56
- def process_audio_input(api_key, audio_path, text_prompt, selected_voice):
57
- """Process audio input and generate a response"""
58
- try:
59
- if not audio_path:
60
- return "Please upload or record audio first.", None
61
-
62
- # Initialize OpenAI client with the provided API key
63
- client = OpenAI(api_key=api_key)
64
-
65
- # Read audio file and encode to base64
66
- with open(audio_path, "rb") as audio_file:
67
- audio_data = audio_file.read()
68
- encoded_audio = base64.b64encode(audio_data).decode('utf-8')
69
-
70
- # Create message content with both text and audio
71
- message_content = []
72
-
73
- if text_prompt:
74
- message_content.append({
75
- "type": "text",
76
- "text": text_prompt
77
- })
78
-
79
- message_content.append({
80
- "type": "input_audio",
81
- "input_audio": {
82
- "data": encoded_audio,
83
- "format": "wav"
84
- }
85
- })
86
-
87
- # Call OpenAI API
88
- completion = client.chat.completions.create(
89
- model="gpt-4o-audio-preview",
90
- modalities=["text", "audio"],
91
- audio={"voice": selected_voice, "format": "wav"},
92
- messages=[
93
- {
94
- "role": "user",
95
- "content": message_content
96
- }
97
- ]
98
- )
99
-
100
- # Save the audio response
101
- wav_bytes = base64.b64decode(completion.choices[0].message.audio.data)
102
- temp_path = tempfile.mktemp(suffix=".wav")
103
- with open(temp_path, "wb") as f:
104
- f.write(wav_bytes)
105
-
106
- # Get the text response
107
- text_response = completion.choices[0].message.content
108
-
109
- return text_response, temp_path
110
- except Exception as e:
111
- return f"Error: {str(e)}", None
112
-
113
- def transcribe_audio(api_key, audio_path):
114
- """Transcribe an audio file using OpenAI's API"""
115
- try:
116
- if not audio_path:
117
- return "No audio file provided for transcription."
118
-
119
- client = OpenAI(api_key=api_key)
120
-
121
- with open(audio_path, "rb") as audio_file:
122
- transcription = client.audio.transcriptions.create(
123
- model="gpt-4o-transcribe",
124
- file=audio_file
125
- )
126
-
127
- return transcription.text
128
- except Exception as e:
129
- return f"Transcription error: {str(e)}"
130
-
131
- def download_example_audio():
132
- """Download a random example audio file for testing"""
133
- try:
134
- # Randomly select one of the example audio URLs
135
- url = random.choice(EXAMPLE_AUDIO_URLS)
136
-
137
- # Get the voice name from the URL for feedback
138
- voice_name = url.split('/')[-1].split('.')[0]
139
-
140
- response = requests.get(url)
141
- response.raise_for_status()
142
-
143
- # Save to a temporary file
144
- temp_path = tempfile.mktemp(suffix=".wav")
145
- with open(temp_path, "wb") as f:
146
- f.write(response.content)
147
-
148
- return temp_path, f"Loaded example voice: {voice_name}"
149
- except Exception as e:
150
- return None, f"Error loading example: {str(e)}"
151
-
152
- def use_example_audio():
153
- """Load random example audio for the interface"""
154
- audio_path, message = download_example_audio()
155
- return audio_path, message
156
-
157
- # Create Gradio Interface
158
- with gr.Blocks(title="OpenAI Audio Chat App") as app:
159
- gr.Markdown("# OpenAI Audio Chat App")
160
- gr.Markdown("Interact with GPT-4o audio model through text and audio inputs")
161
-
162
- # API Key input (used across all tabs)
163
- api_key = gr.Textbox(
164
- label="OpenAI API Key",
165
- placeholder="Enter your OpenAI API key here",
166
- type="password"
167
- )
168
-
169
- with gr.Tab("Text to Audio"):
170
- with gr.Row():
171
- with gr.Column():
172
- text_input = gr.Textbox(
173
- label="Text Prompt",
174
- placeholder="Enter your question or prompt here...",
175
- lines=3
176
- )
177
- text_voice = gr.Dropdown(
178
- choices=VOICES,
179
- value="alloy",
180
- label="Voice"
181
- )
182
- text_submit = gr.Button("Generate Response")
183
-
184
- with gr.Column():
185
- text_output = gr.Textbox(label="AI Response (Checks Error)", lines=5)
186
- audio_output = gr.Audio(label="AI Response (Audio)")
187
- transcribed_output = gr.Textbox(label="Transcription of Audio Response", lines=3)
188
-
189
- # Function to process text input and then transcribe the resulting audio
190
- def text_input_with_transcription(api_key, text_prompt, voice):
191
- text_response, audio_path = process_text_input(api_key, text_prompt, voice)
192
-
193
- # Get transcription of the generated audio
194
- if audio_path:
195
- transcription = transcribe_audio(api_key, audio_path)
196
- else:
197
- transcription = "No audio generated to transcribe."
198
-
199
- return text_response, audio_path, transcription
200
-
201
- text_submit.click(
202
- fn=text_input_with_transcription,
203
- inputs=[api_key, text_input, text_voice],
204
- outputs=[text_output, audio_output, transcribed_output]
205
- )
206
-
207
- with gr.Tab("Audio Input to Audio Response"):
208
- with gr.Row():
209
- with gr.Column():
210
- audio_input = gr.Audio(
211
- label="Audio Input",
212
- type="filepath",
213
- sources=["microphone", "upload"]
214
- )
215
- example_btn = gr.Button("Use Random Example Audio")
216
- example_message = gr.Textbox(label="Example Status", interactive=False)
217
-
218
- accompanying_text = gr.Textbox(
219
- label="Accompanying Text (Optional)",
220
- placeholder="Add any text context or question about the audio...",
221
- lines=2
222
- )
223
- audio_voice = gr.Dropdown(
224
- choices=VOICES,
225
- value="alloy",
226
- label="Response Voice"
227
- )
228
- audio_submit = gr.Button("Process Audio & Generate Response")
229
-
230
- with gr.Column():
231
- audio_text_output = gr.Textbox(label="AI Response (Checks Error)", lines=5)
232
- audio_audio_output = gr.Audio(label="AI Response (Audio)")
233
- audio_transcribed_output = gr.Textbox(label="Transcription of Audio Response", lines=3)
234
- input_transcription = gr.Textbox(label="Transcription of Input Audio", lines=3)
235
-
236
- # Function to process audio input, generate response, and provide transcriptions
237
- def audio_input_with_transcription(api_key, audio_path, text_prompt, voice):
238
- # First transcribe the input audio
239
- input_transcription = "N/A"
240
- if audio_path:
241
- input_transcription = transcribe_audio(api_key, audio_path)
242
-
243
- # Process the audio input and get response
244
- text_response, response_audio_path = process_audio_input(api_key, audio_path, text_prompt, voice)
245
-
246
- # Transcribe the response audio
247
- response_transcription = "No audio generated to transcribe."
248
- if response_audio_path:
249
- response_transcription = transcribe_audio(api_key, response_audio_path)
250
-
251
- return text_response, response_audio_path, response_transcription, input_transcription
252
-
253
- audio_submit.click(
254
- fn=audio_input_with_transcription,
255
- inputs=[api_key, audio_input, accompanying_text, audio_voice],
256
- outputs=[audio_text_output, audio_audio_output, audio_transcribed_output, input_transcription]
257
- )
258
-
259
- example_btn.click(
260
- fn=use_example_audio,
261
- inputs=[],
262
- outputs=[audio_input, example_message]
263
- )
264
-
265
- with gr.Tab("Voice Samples"):
266
- gr.Markdown("## Listen to samples of each voice")
267
-
268
- def generate_voice_sample(api_key, voice_type):
269
- try:
270
- if not api_key:
271
- return "Please enter your OpenAI API key first.", None, "No transcription available."
272
-
273
- client = OpenAI(api_key=api_key)
274
- completion = client.chat.completions.create(
275
- model="gpt-4o-audio-preview",
276
- modalities=["text", "audio"],
277
- audio={"voice": voice_type, "format": "wav"},
278
- messages=[
279
- {
280
- "role": "user",
281
- "content": f"This is a sample of the {voice_type} voice. It has its own unique tone and character."
282
- }
283
- ]
284
- )
285
-
286
- # Save the audio to a temporary file
287
- wav_bytes = base64.b64decode(completion.choices[0].message.audio.data)
288
- temp_path = tempfile.mktemp(suffix=".wav")
289
- with open(temp_path, "wb") as f:
290
- f.write(wav_bytes)
291
-
292
- # Get transcription
293
- transcription = transcribe_audio(api_key, temp_path)
294
-
295
- return f"Sample generated with voice: {voice_type}", temp_path, transcription
296
- except Exception as e:
297
- return f"Error: {str(e)}", None, "No transcription available."
298
-
299
- with gr.Row():
300
- sample_voice = gr.Dropdown(
301
- choices=VOICES,
302
- value="alloy",
303
- label="Select Voice Sample"
304
- )
305
- sample_btn = gr.Button("Generate Sample")
306
-
307
- with gr.Row():
308
- sample_text = gr.Textbox(label="Status")
309
- sample_audio = gr.Audio(label="Voice Sample")
310
- sample_transcription = gr.Textbox(label="Transcription", lines=3)
311
-
312
- sample_btn.click(
313
- fn=generate_voice_sample,
314
- inputs=[api_key, sample_voice],
315
- outputs=[sample_text, sample_audio, sample_transcription]
316
- )
317
-
318
- gr.Markdown("""
319
- ## Notes:
320
- - You must provide your OpenAI API key in the field above
321
- - The model used is `gpt-4o-audio-preview` for conversation and `gpt-4o-transcribe` for transcriptions
322
- - Audio inputs should be in WAV format
323
- - Available voices: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, and verse
324
- - Each audio response is automatically transcribed for verification
325
- - The "Use Random Example Audio" button will load a random sample from OpenAI's demo voices
326
- """)
327
-
328
- if __name__ == "__main__":
329
- app.launch()