Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,17 +3,17 @@ import edge_tts
|
|
3 |
import asyncio
|
4 |
import tempfile
|
5 |
import os
|
6 |
-
from moviepy.editor import
|
7 |
from wand.image import Image
|
8 |
from wand.drawing import Drawing
|
9 |
from wand.color import Color
|
10 |
|
11 |
-
#
|
12 |
async def get_voices():
|
13 |
voices = await edge_tts.list_voices()
|
14 |
return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
|
15 |
|
16 |
-
#
|
17 |
async def text_to_speech(text, voice, rate, pitch):
|
18 |
if not text.strip():
|
19 |
return None, gr.Warning("Please enter the text to convert.")
|
@@ -29,80 +29,64 @@ async def text_to_speech(text, voice, rate, pitch):
|
|
29 |
await communicate.save(tmp_path)
|
30 |
return tmp_path, None
|
31 |
|
32 |
-
#
|
33 |
def text_to_video(text, voice, rate, pitch, video_width, video_height, bg_color, text_color, text_font, text_size):
|
34 |
-
#
|
35 |
font_path = os.path.abspath(text_font) if os.path.exists(text_font) else "Arial"
|
36 |
|
37 |
-
#
|
38 |
-
max_chars_per_line = video_width // (text_size // 2) # 字体宽度假设为字体大小的一半
|
39 |
-
max_lines_per_page = video_height // (text_size + 15) # 10是行间距
|
40 |
-
|
41 |
-
# 按页拆分文本
|
42 |
words = text.split()
|
43 |
lines = []
|
44 |
current_line = ""
|
45 |
-
pages = []
|
46 |
|
47 |
for word in words:
|
48 |
-
if len(current_line) + len(word) + 1 >
|
49 |
lines.append(current_line)
|
50 |
current_line = word
|
51 |
-
if len(lines) == max_lines_per_page:
|
52 |
-
pages.append("\n".join(lines))
|
53 |
-
lines = []
|
54 |
else:
|
55 |
current_line = f"{current_line} {word}".strip()
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
pages.append("\n".join(lines))
|
60 |
|
61 |
-
# 为每页生成独立音频
|
62 |
audio_clips = []
|
63 |
video_clips = []
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
audio, warning = asyncio.run(text_to_speech(audio_text, voice, rate, pitch))
|
68 |
if warning:
|
69 |
return None, warning
|
70 |
audio_clip = AudioFileClip(audio)
|
71 |
audio_clips.append(audio_clip)
|
72 |
|
73 |
-
#
|
74 |
with Drawing() as draw:
|
75 |
draw.font = font_path
|
76 |
draw.font_size = text_size
|
77 |
draw.fill_color = Color(text_color)
|
78 |
-
draw.text_alignment = 'center'
|
79 |
-
draw.text_interline_spacing = 10
|
80 |
with Image(width=video_width, height=video_height, background=Color(bg_color)) as img:
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
draw(img) # Correctly draw the text onto the image
|
88 |
-
img.format = 'png'
|
89 |
-
img_path = os.path.join(tempfile.gettempdir(), f"page_{i}.png")
|
90 |
img.save(filename=img_path)
|
91 |
text_clip = ImageClip(img_path).set_duration(audio_clip.duration).set_audio(audio_clip)
|
92 |
video_clips.append(text_clip)
|
93 |
|
94 |
-
#
|
95 |
final_video = concatenate_videoclips(video_clips)
|
96 |
final_video_path = os.path.join(tempfile.gettempdir(), "output_video.mp4")
|
97 |
final_video.write_videofile(final_video_path, fps=24, codec="libx264")
|
98 |
return final_video_path, None
|
99 |
|
100 |
-
# Gradio
|
101 |
def tts_interface(text, voice, rate, pitch, video_width, video_height, bg_color, text_color, text_font, text_size):
|
102 |
video, warning = text_to_video(text, voice, rate, pitch, video_width, video_height, bg_color, text_color, text_font, text_size)
|
103 |
return None, video, warning
|
104 |
|
105 |
-
#
|
106 |
async def create_demo():
|
107 |
voices = await get_voices()
|
108 |
|
@@ -117,7 +101,7 @@ async def create_demo():
|
|
117 |
gr.Slider(minimum=480, maximum=1080, value=720, label="Video Height", step=10),
|
118 |
gr.ColorPicker(value="#000000", label="Background Color"),
|
119 |
gr.ColorPicker(value="#FFFFFF", label="Text Color"),
|
120 |
-
gr.Textbox(label="Text Font", value="
|
121 |
gr.Slider(minimum=10, maximum=100, value=24, label="Text Size", step=1)
|
122 |
],
|
123 |
outputs=[
|
@@ -133,7 +117,7 @@ async def create_demo():
|
|
133 |
|
134 |
return demo
|
135 |
|
136 |
-
#
|
137 |
if __name__ == "__main__":
|
138 |
demo = asyncio.run(create_demo())
|
139 |
-
demo.launch(share=True)
|
|
|
3 |
import asyncio
|
4 |
import tempfile
|
5 |
import os
|
6 |
+
from moviepy.editor import concatenate_videoclips, AudioFileClip, ImageClip
|
7 |
from wand.image import Image
|
8 |
from wand.drawing import Drawing
|
9 |
from wand.color import Color
|
10 |
|
11 |
+
# Function to get available voices
|
12 |
async def get_voices():
|
13 |
voices = await edge_tts.list_voices()
|
14 |
return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
|
15 |
|
16 |
+
# Text-to-Speech function
|
17 |
async def text_to_speech(text, voice, rate, pitch):
|
18 |
if not text.strip():
|
19 |
return None, gr.Warning("Please enter the text to convert.")
|
|
|
29 |
await communicate.save(tmp_path)
|
30 |
return tmp_path, None
|
31 |
|
32 |
+
# Text-to-Video function
|
33 |
def text_to_video(text, voice, rate, pitch, video_width, video_height, bg_color, text_color, text_font, text_size):
|
34 |
+
# Ensure the font file exists, else use a default font
|
35 |
font_path = os.path.abspath(text_font) if os.path.exists(text_font) else "Arial"
|
36 |
|
37 |
+
# Split text into lines
|
|
|
|
|
|
|
|
|
38 |
words = text.split()
|
39 |
lines = []
|
40 |
current_line = ""
|
|
|
41 |
|
42 |
for word in words:
|
43 |
+
if len(current_line) + len(word) + 1 > (video_width // (text_size // 2)):
|
44 |
lines.append(current_line)
|
45 |
current_line = word
|
|
|
|
|
|
|
46 |
else:
|
47 |
current_line = f"{current_line} {word}".strip()
|
48 |
|
49 |
+
if current_line:
|
50 |
+
lines.append(current_line)
|
|
|
51 |
|
|
|
52 |
audio_clips = []
|
53 |
video_clips = []
|
54 |
+
|
55 |
+
for line in lines:
|
56 |
+
audio, warning = asyncio.run(text_to_speech(line, voice, rate, pitch))
|
|
|
57 |
if warning:
|
58 |
return None, warning
|
59 |
audio_clip = AudioFileClip(audio)
|
60 |
audio_clips.append(audio_clip)
|
61 |
|
62 |
+
# Create an image for each line of text
|
63 |
with Drawing() as draw:
|
64 |
draw.font = font_path
|
65 |
draw.font_size = text_size
|
66 |
draw.fill_color = Color(text_color)
|
|
|
|
|
67 |
with Image(width=video_width, height=video_height, background=Color(bg_color)) as img:
|
68 |
+
# Draw the text in the center
|
69 |
+
text_width = draw.get_text_dimensions(line)[0]
|
70 |
+
y_position = (video_height - text_size) // 2 # Center vertically
|
71 |
+
draw.text((video_width - text_width) // 2, y_position, line) # Center horizontally
|
72 |
+
draw(img)
|
73 |
+
img_path = os.path.join(tempfile.gettempdir(), f"{line}.png")
|
|
|
|
|
|
|
74 |
img.save(filename=img_path)
|
75 |
text_clip = ImageClip(img_path).set_duration(audio_clip.duration).set_audio(audio_clip)
|
76 |
video_clips.append(text_clip)
|
77 |
|
78 |
+
# Combine all video clips
|
79 |
final_video = concatenate_videoclips(video_clips)
|
80 |
final_video_path = os.path.join(tempfile.gettempdir(), "output_video.mp4")
|
81 |
final_video.write_videofile(final_video_path, fps=24, codec="libx264")
|
82 |
return final_video_path, None
|
83 |
|
84 |
+
# Gradio interface function
|
85 |
def tts_interface(text, voice, rate, pitch, video_width, video_height, bg_color, text_color, text_font, text_size):
|
86 |
video, warning = text_to_video(text, voice, rate, pitch, video_width, video_height, bg_color, text_color, text_font, text_size)
|
87 |
return None, video, warning
|
88 |
|
89 |
+
# Create Gradio app
|
90 |
async def create_demo():
|
91 |
voices = await get_voices()
|
92 |
|
|
|
101 |
gr.Slider(minimum=480, maximum=1080, value=720, label="Video Height", step=10),
|
102 |
gr.ColorPicker(value="#000000", label="Background Color"),
|
103 |
gr.ColorPicker(value="#FFFFFF", label="Text Color"),
|
104 |
+
gr.Textbox(label="Text Font", value="Arial"), # Default to Arial for testing
|
105 |
gr.Slider(minimum=10, maximum=100, value=24, label="Text Size", step=1)
|
106 |
],
|
107 |
outputs=[
|
|
|
117 |
|
118 |
return demo
|
119 |
|
120 |
+
# Run the app
|
121 |
if __name__ == "__main__":
|
122 |
demo = asyncio.run(create_demo())
|
123 |
+
demo.launch(share=True)
|