Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,17 +3,14 @@ import edge_tts
|
|
3 |
import asyncio
|
4 |
import tempfile
|
5 |
import os
|
6 |
-
from moviepy.editor import
|
7 |
-
from wand.image import Image
|
8 |
-
from wand.drawing import Drawing
|
9 |
-
from wand.color import Color
|
10 |
|
11 |
-
#
|
12 |
async def get_voices():
|
13 |
voices = await edge_tts.list_voices()
|
14 |
return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
|
15 |
|
16 |
-
#
|
17 |
async def text_to_speech(text, voice, rate, pitch):
|
18 |
if not text.strip():
|
19 |
return None, gr.Warning("Please enter the text to convert.")
|
@@ -29,95 +26,35 @@ async def text_to_speech(text, voice, rate, pitch):
|
|
29 |
await communicate.save(tmp_path)
|
30 |
return tmp_path, None
|
31 |
|
32 |
-
#
|
33 |
-
def text_to_video(text, voice, rate, pitch, video_width, video_height
|
34 |
-
#
|
35 |
-
|
|
|
|
|
36 |
|
37 |
-
|
38 |
-
max_chars_per_line = video_width // (text_size // 2) # 字体宽度假设为字体大小的一半
|
39 |
-
max_lines_per_page = video_height // (text_size + 15) # 10是行间距
|
40 |
-
|
41 |
-
# 按页拆分文本
|
42 |
-
words = text.split()
|
43 |
-
lines = []
|
44 |
-
current_line = ""
|
45 |
-
pages = []
|
46 |
-
|
47 |
-
for word in words:
|
48 |
-
if len(current_line) + len(word) + 1 > max_chars_per_line:
|
49 |
-
lines.append(current_line)
|
50 |
-
current_line = word
|
51 |
-
if len(lines) == max_lines_per_page:
|
52 |
-
pages.append("\n".join(lines))
|
53 |
-
lines = []
|
54 |
-
else:
|
55 |
-
current_line = f"{current_line} {word}".strip()
|
56 |
|
57 |
-
|
58 |
-
if
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
audio_clips = []
|
63 |
-
video_clips = []
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
audio_text = page.replace("\n", " ") # 移除换行符以防止 TTS 停顿
|
68 |
-
audio, warning = asyncio.run(text_to_speech(audio_text, voice, rate, pitch))
|
69 |
-
if warning:
|
70 |
-
return None, warning
|
71 |
-
audio_clip = AudioFileClip(audio)
|
72 |
-
audio_clips.append(audio_clip)
|
73 |
-
|
74 |
-
# 使用 wand 生成视频片段
|
75 |
-
with Drawing() as draw:
|
76 |
-
draw.font = font_path
|
77 |
-
draw.font_size = text_size
|
78 |
-
draw.fill_color = Color(text_color)
|
79 |
-
draw.text_alignment = 'center'
|
80 |
-
draw.text_interline_spacing = 10
|
81 |
-
|
82 |
-
with Image(width=video_width, height=video_height, background=Color('transparent')) as img:
|
83 |
-
lines = page.split("\n")
|
84 |
-
# Centering text vertically
|
85 |
-
total_text_height = len(lines) * (text_size + 10) # Height of text area
|
86 |
-
start_y = (video_height - total_text_height) // 2 # Start position to center vertically
|
87 |
-
|
88 |
-
for j, line in enumerate(lines):
|
89 |
-
draw.text(int(video_width / 2), start_y + (j * (text_size + 10)), line)
|
90 |
-
|
91 |
-
draw(img) # Apply the drawing to the image
|
92 |
-
img.format = 'png'
|
93 |
-
img_path = os.path.join(tempfile.gettempdir(), f"page_{i}.png")
|
94 |
-
img.save(filename=img_path)
|
95 |
-
text_clip = ImageClip(img_path).set_duration(audio_clip.duration).set_audio(audio_clip)
|
96 |
-
video_clips.append(text_clip)
|
97 |
-
|
98 |
-
# 合并所有视频片段
|
99 |
-
final_video = concatenate_videoclips(video_clips)
|
100 |
-
|
101 |
-
# If a background image is provided, create a CompositeVideoClip
|
102 |
-
if bg_image:
|
103 |
-
bg_clip = ImageClip(bg_image).set_duration(final_video.duration).resize(newsize=(video_width, video_height))
|
104 |
-
final_video = CompositeVideoClip([bg_clip, final_video.set_position("center")])
|
105 |
|
106 |
final_video_path = os.path.join(tempfile.gettempdir(), "output_video.mp4")
|
107 |
final_video.write_videofile(final_video_path, fps=24, codec="libx264")
|
|
|
108 |
return final_video_path, None
|
109 |
|
110 |
-
# Gradio
|
111 |
-
def tts_interface(text, voice, rate, pitch, video_width, video_height
|
112 |
-
|
113 |
-
if bg_image is not None:
|
114 |
-
video, warning = text_to_video(text, voice, rate, pitch, video_width, video_height, bg_image.name, text_color, text_font, text_size)
|
115 |
-
else:
|
116 |
-
# Pass None for the bg_image if not uploaded
|
117 |
-
video, warning = text_to_video(text, voice, rate, pitch, video_width, video_height, None, text_color, text_font, text_size)
|
118 |
return None, video, warning
|
119 |
|
120 |
-
#
|
121 |
async def create_demo():
|
122 |
voices = await get_voices()
|
123 |
|
@@ -128,12 +65,9 @@ async def create_demo():
|
|
128 |
gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=""),
|
129 |
gr.Slider(minimum=-50, maximum=50, value=0, label="Rate Adjustment (%)", step=1),
|
130 |
gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch Adjustment (Hz)", step=1),
|
|
|
131 |
gr.Slider(minimum=640, maximum=1920, value=1080, label="Video Width", step=10),
|
132 |
gr.Slider(minimum=480, maximum=1080, value=720, label="Video Height", step=10),
|
133 |
-
gr.File(label="Upload Background Image or Video", type="filepath"), # Changed type to 'filepath'
|
134 |
-
gr.ColorPicker(value="#FFFFFF", label="Text Color"),
|
135 |
-
gr.Textbox(label="Text Font", value="msyh.ttf"), # Please ensure the font file path is correct
|
136 |
-
gr.Slider(minimum=10, maximum=100, value=24, label="Text Size", step=1)
|
137 |
],
|
138 |
outputs=[
|
139 |
gr.Audio(label="Generated Audio", type="filepath"),
|
@@ -141,14 +75,14 @@ async def create_demo():
|
|
141 |
gr.Markdown(label="Warning", visible=False)
|
142 |
],
|
143 |
title="Edge TTS Text to Speech and Video",
|
144 |
-
description="Convert text to speech and video using Microsoft Edge TTS.
|
145 |
analytics_enabled=False,
|
146 |
allow_flagging=False,
|
147 |
)
|
148 |
|
149 |
return demo
|
150 |
|
151 |
-
#
|
152 |
if __name__ == "__main__":
|
153 |
demo = asyncio.run(create_demo())
|
154 |
demo.launch()
|
|
|
3 |
import asyncio
|
4 |
import tempfile
|
5 |
import os
|
6 |
+
from moviepy.editor import concatenate_videoclips, AudioFileClip, ImageClip, VideoFileClip
|
|
|
|
|
|
|
7 |
|
8 |
+
# Get all available voices
|
9 |
async def get_voices():
|
10 |
voices = await edge_tts.list_voices()
|
11 |
return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
|
12 |
|
13 |
+
# Text-to-speech function
|
14 |
async def text_to_speech(text, voice, rate, pitch):
|
15 |
if not text.strip():
|
16 |
return None, gr.Warning("Please enter the text to convert.")
|
|
|
26 |
await communicate.save(tmp_path)
|
27 |
return tmp_path, None
|
28 |
|
29 |
+
# Text-to-video function
|
30 |
+
def text_to_video(text, voice, rate, pitch, bg_media, video_width, video_height):
|
31 |
+
# Generate audio from text
|
32 |
+
audio, warning = asyncio.run(text_to_speech(text, voice, rate, pitch))
|
33 |
+
if warning:
|
34 |
+
return None, warning
|
35 |
|
36 |
+
audio_clip = AudioFileClip(audio)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Create background video or image
|
39 |
+
if bg_media.endswith('.mp4'):
|
40 |
+
bg_clip = VideoFileClip(bg_media).resize(newsize=(video_width, video_height)).set_duration(audio_clip.duration)
|
41 |
+
else:
|
42 |
+
bg_clip = ImageClip(bg_media).set_duration(audio_clip.duration).resize(newsize=(video_width, video_height))
|
|
|
|
|
43 |
|
44 |
+
# Set audio for the background
|
45 |
+
final_video = bg_clip.set_audio(audio_clip)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
final_video_path = os.path.join(tempfile.gettempdir(), "output_video.mp4")
|
48 |
final_video.write_videofile(final_video_path, fps=24, codec="libx264")
|
49 |
+
|
50 |
return final_video_path, None
|
51 |
|
52 |
+
# Gradio interface function
|
53 |
+
def tts_interface(text, voice, rate, pitch, bg_media, video_width, video_height):
|
54 |
+
video, warning = text_to_video(text, voice, rate, pitch, bg_media, video_width, video_height)
|
|
|
|
|
|
|
|
|
|
|
55 |
return None, video, warning
|
56 |
|
57 |
+
# Create Gradio app
|
58 |
async def create_demo():
|
59 |
voices = await get_voices()
|
60 |
|
|
|
65 |
gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=""),
|
66 |
gr.Slider(minimum=-50, maximum=50, value=0, label="Rate Adjustment (%)", step=1),
|
67 |
gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch Adjustment (Hz)", step=1),
|
68 |
+
gr.File(label="Upload Background Image or Video", type="filepath"),
|
69 |
gr.Slider(minimum=640, maximum=1920, value=1080, label="Video Width", step=10),
|
70 |
gr.Slider(minimum=480, maximum=1080, value=720, label="Video Height", step=10),
|
|
|
|
|
|
|
|
|
71 |
],
|
72 |
outputs=[
|
73 |
gr.Audio(label="Generated Audio", type="filepath"),
|
|
|
75 |
gr.Markdown(label="Warning", visible=False)
|
76 |
],
|
77 |
title="Edge TTS Text to Speech and Video",
|
78 |
+
description="Convert text to speech and video using Microsoft Edge TTS. Upload an image or video for the background.",
|
79 |
analytics_enabled=False,
|
80 |
allow_flagging=False,
|
81 |
)
|
82 |
|
83 |
return demo
|
84 |
|
85 |
+
# Run the application
|
86 |
if __name__ == "__main__":
|
87 |
demo = asyncio.run(create_demo())
|
88 |
demo.launch()
|