Cyleux's picture
Update spaces.py
9fba971 verified
import sys
import gradio as gr
import requests
import io
import base64
import json
import tempfile
import os
import numpy as np
import random
from PIL import Image as PILImage, ImageDraw, ImageFont
from moviepy.editor import *
import textwrap
from pydub import AudioSegment
import datetime
import cairosvg
import anthropic
import concurrent.futures
# Initialize Anthropic client
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
# ElevenLabs API key
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
def get_voices():
url = "https://api.elevenlabs.io/v1/voices"
headers = {
"xi-api-key": elevenlabs_api_key
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
voices = response.json()["voices"]
return [(voice["name"], voice["voice_id"]) for voice in voices]
return []
def get_convo_list(description):
prompt =f"Your task is to return a JSON object representing a complete conversation containing a key 'turns' with a value which is just a list of objects containing 'turn_number', an integer, and 'message', the message for that turn. Ensure you return as many turns as the user specifies, if they specify. Remember, each turn is a turn in a conversation between a phone agent (female) and a human (male) unless otherwise specified. The phone agent should speak first, unless otherwise specified. The conversation is described as:\n{description}.\nCritically, ensure that the human turns employ filler words (uh, uhhhh, ummmm, yeahhh, hm, hmm, etc with repeated letters to denote thinking...) and realistic language without using *sounds effects*. I repeat, do NOT use *sound effects*. Additionally, do not over-use filler words or start every human response with them. The goal is to sound realistic, not exagerrated. The AI should be conversational, employing transition phrases. The AI should always end their response with a question except when saying goodbye. Additionally, digits spaced out. For instance, the human might say: 'My phone number is 8 3 1... 5 4 8... 9 2 2 3...' instead of writing it out. They might also say 'My email is steve at gmail dot com.' where it is written out. Now provide the JSON."
new_output = ""
total_tokens = 350
with client.messages.stream(
max_tokens=8000,
messages=[
{"role": "user", "content": prompt}
],
model="claude-3-5-sonnet-20241022",
temperature=0.1,
) as stream:
for text in stream.text_stream:
new_output += text
first_brace = new_output.find('{')
last_brace = new_output.rfind('}')
new_output = new_output[first_brace:last_brace+1]
new_output = json.loads(new_output)
output_list = []
for i in new_output["turns"]:
output_list.append(i['message'])
return output_list
def download_and_convert_svg_to_png(svg_url):
response = requests.get(svg_url)
if response.status_code == 200:
svg_data = response.content
png_data = cairosvg.svg2png(bytestring=svg_data)
image = PILImage.open(io.BytesIO(png_data))
return image
else:
print(f"Failed to download SVG image from {svg_url}")
return None
def generate_speech(text, voice_id, stability=0.8, style=0):
model_id = "eleven_multilingual_v2"
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
payload = {
"text": text,
"model_id": model_id,
"voice_settings": {
"stability": stability,
"similarity_boost": 0.5,
"use_speaker_boost": True,
"style": style,
}
}
headers = {
"xi-api-key": elevenlabs_api_key,
"Accept": "audio/mpeg"
}
response = requests.request("POST", url, json=payload, headers=headers)
if response.status_code == 200:
return response.content
else:
print(f"Error generating speech: {response.status_code} - {response.text}")
return None
def create_text_image(text, logo_image, text_color, image_size=(1920, 1080), bg_color="#0e2e28", font_size=70, logo_scale=0.05):
bg_color_rgb = PILImage.new("RGB", (1, 1), color=bg_color).getpixel((0, 0))
text_color_rgb = PILImage.new("RGB", (1, 1), color=text_color).getpixel((0, 0))
img = PILImage.new('RGB', image_size, color=bg_color_rgb)
draw = ImageDraw.Draw(img)
logo_aspect_ratio = logo_image.width / logo_image.height
logo_height = int(image_size[1] * logo_scale)
logo_width = int(logo_height * logo_aspect_ratio)
logo_image = logo_image.resize((logo_width, logo_height))
logo_position = (int(image_size[0] * 0.05), int(image_size[1] / 2 - logo_height / 2))
img.paste(logo_image, logo_position, logo_image.convert('RGBA'))
text_area_x = logo_position[0] + logo_width + int(image_size[0] * 0.05)
text_area_width = image_size[0] - text_area_x - int(image_size[0] * 0.05)
try:
import cv2
font_path = os.path.join(cv2.__path__[0],'qt','fonts','DejaVuSans.ttf')
font = ImageFont.truetype(font_path, size=font_size)
except IOError:
font = ImageFont.load_default()
max_chars_per_line = int(text_area_width / (font_size * 0.6))
wrapped_text = textwrap.fill(text, width=max_chars_per_line)
draw_img = PILImage.new('RGB', (text_area_width, image_size[1]))
draw_draw = ImageDraw.Draw(draw_img)
try:
bbox = draw_draw.multiline_textbbox((0, 0), wrapped_text, font=font, align='left')
except AttributeError:
bbox = draw_draw.textbox((0, 0), wrapped_text, font=font, align='left')
text_height = bbox[3] - bbox[1]
text_position = (text_area_x, int((image_size[1] - text_height) / 2))
draw.multiline_text(text_position, wrapped_text, fill=text_color_rgb, font=font, align='left')
return img
def trim_silence_from_end(audio_segment, silence_threshold=-50.0, chunk_size=10):
duration_ms = len(audio_segment)
trim_ms = 0
while trim_ms < duration_ms:
start_index = duration_ms - trim_ms - chunk_size
if start_index < 0:
start_index = 0
chunk = audio_segment[start_index:duration_ms - trim_ms]
if chunk.dBFS > silence_threshold:
break
trim_ms += chunk_size
if trim_ms > 0:
return audio_segment[:duration_ms - trim_ms]
else:
return audio_segment
def add_silence_to_audio(audio_content, silence_duration=0):
silence = AudioSegment.silent(duration=silence_duration)
original_audio = AudioSegment.from_file(io.BytesIO(audio_content), format="mp3")
original_audio = trim_silence_from_end(original_audio)
new_audio = silence + original_audio
audio_io = io.BytesIO()
new_audio.export(audio_io, format="wav", parameters=["-ar", "44100"])
audio_io.seek(0)
return audio_io.read()
def create_video_clip(image, duration, target_resolution=(1920, 1080)):
image = image.convert('RGB')
img_array = np.array(image)
clip = ImageClip(img_array)
clip = clip.resize(newsize=target_resolution)
return clip.set_duration(duration)
def process_message(args):
i, message, logo_image, voice_ids, male_stability, male_style = args
voice_id = voice_ids[i % len(voice_ids)]
if i % len(voice_ids) == 0:
text_color = "#cdfa8a"
stability = 0.8
style = 0
else:
text_color = "#FFFFFF"
stability = male_stability
style = male_style
try:
audio_content = generate_speech(message, voice_id, stability=stability, style=style)
if audio_content is None:
return (None, None, None)
audio_data = add_silence_to_audio(audio_content, silence_duration=0)
temp_audio_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
temp_audio_file.write(audio_data)
temp_audio_file.close()
temp_audio_path = temp_audio_file.name
audio_clip = AudioFileClip(temp_audio_path)
audio_duration = audio_clip.duration
image = create_text_image(message, logo_image, text_color, font_size=30, logo_scale=0.07)
video_clip = create_video_clip(image, duration=audio_duration)
audio_clip = audio_clip.set_duration(video_clip.duration)
audio_clip = audio_clip.audio_fadeout(0.2)
video_clip = video_clip.set_audio(audio_clip)
return (video_clip, audio_clip, temp_audio_path)
except Exception as e:
print(f"Error processing message {i+1}: {e}")
return (None, None, None)
def generate_conversation_video(messages, voice_ids, logo_url, male_stability, male_style):
logo_image = download_and_convert_svg_to_png(logo_url)
if logo_image is None:
return None
video_clips = []
audio_clips = []
temp_audio_paths = []
args = [(i, message, logo_image, voice_ids, male_stability, male_style) for i, message in enumerate(messages)]
max_workers = 5
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
results = list(executor.map(process_message, args))
for i, (video_clip, audio_clip, temp_audio_path) in enumerate(results):
if video_clip and audio_clip:
if i > 0:
gap_duration = random.uniform(0.6, 1.3)
silence = AudioClip(lambda t: 0, duration=gap_duration)
previous_frame = video_clips[-1].get_frame(-1)
gap_clip = ImageClip(previous_frame).set_duration(gap_duration)
video_clips.append(gap_clip)
audio_clips.append(silence)
video_clips.append(video_clip)
audio_clips.append(audio_clip)
temp_audio_paths.append(temp_audio_path)
else:
if temp_audio_path:
os.unlink(temp_audio_path)
if not video_clips or not audio_clips:
return None
final_audio = concatenate_audioclips(audio_clips)
video_clips_no_audio = [clip.without_audio() for clip in video_clips]
final_video = concatenate_videoclips(video_clips_no_audio, method="chain")
final_video = final_video.set_audio(final_audio)
temp_video_path = tempfile.mktemp(suffix='.mp4')
final_video.write_videofile(
temp_video_path,
fps=2,
codec="libx264",
audio_codec="aac",
audio_bitrate="192k",
temp_audiofile='temp-audio.m4a',
remove_temp=True,
verbose=False,
logger=None
)
# Cleanup
for clip in audio_clips:
clip.close()
for path in temp_audio_paths:
if os.path.exists(path):
os.unlink(path)
return temp_video_path
def generate_video(description, female_voice, male_voice, male_stability=0.65, male_style=0.35):
voice_ids = [
female_voice, # First speaker (female)
male_voice # Second speaker (male)
]
logo_url = "https://opencall.ai/images/logo-symbol.svg"
messages = get_convo_list(description)
video_path = generate_conversation_video(messages, voice_ids, logo_url, male_stability, male_style)
return video_path
# Get available voices
voices = get_voices()
default_female_id = "cgSgspJ2msm6clMCkdW9" # Default female voice ID
default_male_id = "3Niy6MUaDzcs7Liw7dFs" # Default male voice ID
# Create voice selection dropdowns
female_voice_names = [(voice[0], voice[1]) for voice in voices]
male_voice_names = [(voice[0], voice[1]) for voice in voices]
# Set default selections
default_female_idx = next((i for i, v in enumerate(female_voice_names) if v[1] == default_female_id), 0)
default_male_idx = next((i for i, v in enumerate(male_voice_names) if v[1] == default_male_id), 0)
# Create Gradio interface
iface = gr.Interface(
fn=generate_video,
inputs=[
gr.Textbox(
label="Enter conversation description",
lines=5,
placeholder="Describe the conversation you want to generate...",
info="You can be specific about the number of turns, tone, and content of the conversation"
),
gr.Dropdown(
choices=female_voice_names,
value=female_voice_names[default_female_idx][1],
label="Female Voice",
type="value",
info="Select the voice for the phone agent"
),
gr.Dropdown(
choices=male_voice_names,
value=male_voice_names[default_male_idx][1],
label="Male Voice",
type="value",
info="Select the voice for the customer"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.65,
label="Male Voice Stability",
info="Controls the consistency of the male voice (default: 0.65)"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.35,
label="Male Voice Style",
info="Controls the expressiveness of the male voice (default: 0.35)"
)
],
outputs=gr.Video(label="Generated Video"),
title="AI Conversation Video Generator",
description="Generate a video conversation between two speakers based on your description."
)
iface.launch()