Cyleux commited on
Commit
2918e4b
·
verified ·
1 Parent(s): 4687872

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .github/workflows/update_space.yml +28 -0
  2. README.md +2 -8
  3. requirements.txt +10 -0
  4. spaces.py +283 -0
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: OpenCall Demo Generator
3
- emoji: 🐠
4
- colorFrom: yellow
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.3.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: OpenCall_Demo_Generator
3
+ app_file: spaces.py
 
 
4
  sdk: gradio
5
  sdk_version: 5.3.0
 
 
6
  ---
 
 
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ sys
3
+ requests
4
+ pillow
5
+ numpy
6
+ moviepy
7
+ pydub
8
+ cairosvg
9
+ anthropic
10
+ concurrent-futures
spaces.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import gradio as gr
3
+ import requests
4
+ import io
5
+ import base64
6
+ import json
7
+ import tempfile
8
+ import os
9
+ import numpy as np
10
+ import random
11
+ from PIL import Image as PILImage, ImageDraw, ImageFont
12
+ from moviepy.editor import *
13
+ import textwrap
14
+ from pydub import AudioSegment
15
+ import datetime
16
+ import cairosvg
17
+ import anthropic
18
+ import concurrent.futures
19
+
20
+ # Initialize Anthropic client
21
+ client = anthropic.Anthropic(api_key="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")
22
+
23
+ # ElevenLabs API key
24
+ elevenlabs_api_key = "78508463a25af4cbc1ea778eea0a1ba6"
25
+
26
+ def get_convo_list(description):
27
+ prompt =f"Your task is to return a JSON object representing a complete conversation containing a key 'turns' with a value which is just a list of objects containing 'turn_number', an integer, and 'message', the message for that turn. Ensure you return as many turns as the user specifies, if they specify. Remember, each turn is a turn in a conversation between a phone agent (female) and a human (male). The phone agent should speak first. The conversation is described as:\n{description}.\nCritically, ensure that the human turns employ filler words (uh, uhhhh, ummmm, yeahhh, hm, hmm, etc with repeated letters to denote thinking...) and realistic language without using *sounds effects*. I repeat, do NOT use *sound effects*. Additionally, do not over-use filler words or start every human response with them. The goal is to sound realistic, not exagerrated. The AI should be conversational, employing transition phrases. The AI should always end their response with a question except when saying goodbye. Additionally, digits spaced out. For instance, the human might say: 'My phone number is 5 4 8... 9 2 2 3...' instead of writing it out. They might also say 'My email is steve at gmail dot com.' where it is written out. Now provide the JSON."
28
+ new_output = ""
29
+ total_tokens = 350
30
+
31
+ with client.messages.stream(
32
+ max_tokens=8000,
33
+ messages=[
34
+ {"role": "user", "content": prompt}
35
+ ],
36
+ model="claude-3-5-sonnet-20241022",
37
+ temperature=0.1,
38
+ ) as stream:
39
+ for text in stream.text_stream:
40
+ new_output += text
41
+
42
+ first_brace = new_output.find('{')
43
+ last_brace = new_output.rfind('}')
44
+ new_output = new_output[first_brace:last_brace+1]
45
+ new_output = json.loads(new_output)
46
+ output_list = []
47
+ for i in new_output["turns"]:
48
+ output_list.append(i['message'])
49
+ return output_list
50
+
51
+ def download_and_convert_svg_to_png(svg_url):
52
+ response = requests.get(svg_url)
53
+ if response.status_code == 200:
54
+ svg_data = response.content
55
+ png_data = cairosvg.svg2png(bytestring=svg_data)
56
+ image = PILImage.open(io.BytesIO(png_data))
57
+ return image
58
+ else:
59
+ print(f"Failed to download SVG image from {svg_url}")
60
+ return None
61
+
62
+ def generate_speech(text, voice_id, stability=0.8, style=0):
63
+ model_id = "eleven_multilingual_v2"
64
+ url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
65
+ payload = {
66
+ "text": text,
67
+ "model_id": model_id,
68
+ "voice_settings": {
69
+ "stability": stability,
70
+ "similarity_boost": 0.5,
71
+ "use_speaker_boost": True,
72
+ "style": 0,
73
+ }
74
+ }
75
+ headers = {
76
+ "xi-api-key": elevenlabs_api_key,
77
+ "Accept": "audio/mpeg"
78
+ }
79
+ response = requests.request("POST", url, json=payload, headers=headers)
80
+ if response.status_code == 200:
81
+ return response.content
82
+ else:
83
+ print(f"Error generating speech: {response.status_code} - {response.text}")
84
+ return None
85
+
86
+ def create_text_image(text, logo_image, text_color, image_size=(1920, 1080), bg_color="#0e2e28", font_size=70, logo_scale=0.05):
87
+ bg_color_rgb = PILImage.new("RGB", (1, 1), color=bg_color).getpixel((0, 0))
88
+ text_color_rgb = PILImage.new("RGB", (1, 1), color=text_color).getpixel((0, 0))
89
+
90
+ img = PILImage.new('RGB', image_size, color=bg_color_rgb)
91
+ draw = ImageDraw.Draw(img)
92
+
93
+ logo_aspect_ratio = logo_image.width / logo_image.height
94
+ logo_height = int(image_size[1] * logo_scale)
95
+ logo_width = int(logo_height * logo_aspect_ratio)
96
+ logo_image = logo_image.resize((logo_width, logo_height))
97
+ logo_position = (int(image_size[0] * 0.05), int(image_size[1] / 2 - logo_height / 2))
98
+ img.paste(logo_image, logo_position, logo_image.convert('RGBA'))
99
+
100
+ text_area_x = logo_position[0] + logo_width + int(image_size[0] * 0.05)
101
+ text_area_width = image_size[0] - text_area_x - int(image_size[0] * 0.05)
102
+
103
+ try:
104
+ import cv2
105
+ font_path = os.path.join(cv2.__path__[0],'qt','fonts','DejaVuSans.ttf')
106
+ font = ImageFont.truetype(font_path, size=font_size)
107
+ except IOError:
108
+ font = ImageFont.load_default()
109
+
110
+ max_chars_per_line = int(text_area_width / (font_size * 0.6))
111
+ wrapped_text = textwrap.fill(text, width=max_chars_per_line)
112
+
113
+ draw_img = PILImage.new('RGB', (text_area_width, image_size[1]))
114
+ draw_draw = ImageDraw.Draw(draw_img)
115
+ try:
116
+ bbox = draw_draw.multiline_textbbox((0, 0), wrapped_text, font=font, align='left')
117
+ except AttributeError:
118
+ bbox = draw_draw.textbox((0, 0), wrapped_text, font=font, align='left')
119
+ text_height = bbox[3] - bbox[1]
120
+ text_position = (text_area_x, int((image_size[1] - text_height) / 2))
121
+
122
+ draw.multiline_text(text_position, wrapped_text, fill=text_color_rgb, font=font, align='left')
123
+
124
+ return img
125
+
126
+ def trim_silence_from_end(audio_segment, silence_threshold=-50.0, chunk_size=10):
127
+ duration_ms = len(audio_segment)
128
+ trim_ms = 0
129
+
130
+ while trim_ms < duration_ms:
131
+ start_index = duration_ms - trim_ms - chunk_size
132
+ if start_index < 0:
133
+ start_index = 0
134
+ chunk = audio_segment[start_index:duration_ms - trim_ms]
135
+ if chunk.dBFS > silence_threshold:
136
+ break
137
+ trim_ms += chunk_size
138
+
139
+ if trim_ms > 0:
140
+ return audio_segment[:duration_ms - trim_ms]
141
+ else:
142
+ return audio_segment
143
+
144
+ def add_silence_to_audio(audio_content, silence_duration=0):
145
+ silence = AudioSegment.silent(duration=silence_duration)
146
+ original_audio = AudioSegment.from_file(io.BytesIO(audio_content), format="mp3")
147
+ original_audio = trim_silence_from_end(original_audio)
148
+ new_audio = silence + original_audio
149
+ audio_io = io.BytesIO()
150
+ new_audio.export(audio_io, format="wav", parameters=["-ar", "44100"])
151
+ audio_io.seek(0)
152
+ return audio_io.read()
153
+
154
+ def create_video_clip(image, duration, target_resolution=(1920, 1080)):
155
+ image = image.convert('RGB')
156
+ img_array = np.array(image)
157
+ clip = ImageClip(img_array)
158
+ clip = clip.resize(newsize=target_resolution)
159
+ return clip.set_duration(duration)
160
+
161
+ def process_message(args):
162
+ i, message, logo_image, voice_ids = args
163
+ voice_id = voice_ids[i % len(voice_ids)]
164
+
165
+ if i % len(voice_ids) == 0:
166
+ text_color = "#cdfa8a"
167
+ stability = 0.8
168
+ style = 0
169
+ else:
170
+ text_color = "#FFFFFF"
171
+ stability = 0.27
172
+ style = 0.3
173
+
174
+ try:
175
+ audio_content = generate_speech(message, voice_id, stability=stability, style=style)
176
+ if audio_content is None:
177
+ return (None, None, None)
178
+
179
+ audio_data = add_silence_to_audio(audio_content, silence_duration=0)
180
+
181
+ temp_audio_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
182
+ temp_audio_file.write(audio_data)
183
+ temp_audio_file.close()
184
+ temp_audio_path = temp_audio_file.name
185
+
186
+ audio_clip = AudioFileClip(temp_audio_path)
187
+ audio_duration = audio_clip.duration
188
+
189
+ image = create_text_image(message, logo_image, text_color, font_size=30, logo_scale=0.07)
190
+ video_clip = create_video_clip(image, duration=audio_duration)
191
+ audio_clip = audio_clip.set_duration(video_clip.duration)
192
+ audio_clip = audio_clip.audio_fadeout(0.2)
193
+ video_clip = video_clip.set_audio(audio_clip)
194
+
195
+ return (video_clip, audio_clip, temp_audio_path)
196
+ except Exception as e:
197
+ print(f"Error processing message {i+1}: {e}")
198
+ return (None, None, None)
199
+
200
+ def generate_conversation_video(messages, voice_ids, logo_url):
201
+ logo_image = download_and_convert_svg_to_png(logo_url)
202
+ if logo_image is None:
203
+ return None
204
+
205
+ video_clips = []
206
+ audio_clips = []
207
+ temp_audio_paths = []
208
+
209
+ args = [(i, message, logo_image, voice_ids) for i, message in enumerate(messages)]
210
+ max_workers = 5
211
+
212
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
213
+ results = list(executor.map(process_message, args))
214
+
215
+ for i, (video_clip, audio_clip, temp_audio_path) in enumerate(results):
216
+ if video_clip and audio_clip:
217
+ if i > 0:
218
+ gap_duration = random.uniform(0.6, 1.3)
219
+ silence = AudioClip(lambda t: 0, duration=gap_duration)
220
+ previous_frame = video_clips[-1].get_frame(-1)
221
+ gap_clip = ImageClip(previous_frame).set_duration(gap_duration)
222
+ video_clips.append(gap_clip)
223
+ audio_clips.append(silence)
224
+
225
+ video_clips.append(video_clip)
226
+ audio_clips.append(audio_clip)
227
+ temp_audio_paths.append(temp_audio_path)
228
+ else:
229
+ if temp_audio_path:
230
+ os.unlink(temp_audio_path)
231
+
232
+ if not video_clips or not audio_clips:
233
+ return None
234
+
235
+ final_audio = concatenate_audioclips(audio_clips)
236
+ video_clips_no_audio = [clip.without_audio() for clip in video_clips]
237
+ final_video = concatenate_videoclips(video_clips_no_audio, method="chain")
238
+ final_video = final_video.set_audio(final_audio)
239
+
240
+ temp_video_path = tempfile.mktemp(suffix='.mp4')
241
+ final_video.write_videofile(
242
+ temp_video_path,
243
+ fps=2,
244
+ codec="libx264",
245
+ audio_codec="aac",
246
+ audio_bitrate="192k",
247
+ temp_audiofile='temp-audio.m4a',
248
+ remove_temp=True,
249
+ verbose=False,
250
+ logger=None
251
+ )
252
+
253
+ # Cleanup
254
+ for clip in audio_clips:
255
+ clip.close()
256
+ for path in temp_audio_paths:
257
+ if os.path.exists(path):
258
+ os.unlink(path)
259
+
260
+ return temp_video_path
261
+
262
+ def generate_video(description):
263
+ voice_ids = [
264
+ "cgSgspJ2msm6clMCkdW9", # First speaker
265
+ "roraOcl4kU2pC4JUa2Cz" # Second speaker
266
+ ]
267
+ logo_url = "https://opencall.ai/images/logo-symbol.svg"
268
+
269
+ messages = get_convo_list(description)
270
+ video_path = generate_conversation_video(messages, voice_ids, logo_url)
271
+
272
+ return video_path
273
+
274
+ # Create Gradio interface
275
+ iface = gr.Interface(
276
+ fn=generate_video,
277
+ inputs=gr.Textbox(label="Enter conversation description"),
278
+ outputs=gr.Video(label="Generated Video"),
279
+ title="AI Conversation Video Generator",
280
+ description="Generate a video conversation between two speakers based on your description."
281
+ )
282
+
283
+ iface.launch()