Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
from
|
2 |
import streamlit as st
|
3 |
-
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip
|
4 |
import cv2
|
5 |
import base64
|
6 |
import io
|
@@ -9,7 +8,7 @@ import os
|
|
9 |
import requests
|
10 |
import tempfile
|
11 |
|
12 |
-
# Load environment variables
|
13 |
load_dotenv('.env.local')
|
14 |
|
15 |
def check_password():
|
@@ -17,7 +16,6 @@ def check_password():
|
|
17 |
if correct_password is None:
|
18 |
st.error("Password is not set in .env.local")
|
19 |
return False
|
20 |
-
|
21 |
user_password = st.text_input("Enter the password to proceed", type="password")
|
22 |
if user_password == correct_password:
|
23 |
return True
|
@@ -30,7 +28,7 @@ def video_to_frames(video_file, frame_sampling_rate=1):
|
|
30 |
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile:
|
31 |
tmpfile.write(video_file.read())
|
32 |
video_filename = tmpfile.name
|
33 |
-
|
34 |
video_clip = VideoFileClip(video_filename)
|
35 |
video_duration = video_clip.duration
|
36 |
fps = video_clip.fps
|
@@ -39,7 +37,7 @@ def video_to_frames(video_file, frame_sampling_rate=1):
|
|
39 |
video = cv2.VideoCapture(video_filename)
|
40 |
base64Frame = []
|
41 |
current_frame = 0
|
42 |
-
|
43 |
while video.isOpened():
|
44 |
success, frame = video.read()
|
45 |
if not success:
|
@@ -50,7 +48,6 @@ def video_to_frames(video_file, frame_sampling_rate=1):
|
|
50 |
current_frame += 1
|
51 |
|
52 |
video.release()
|
53 |
-
print(f"{len(base64Frame)} frames read at a sampling rate of {frame_sampling_rate} second(s) per frame.")
|
54 |
return base64Frame, video_filename, video_duration
|
55 |
|
56 |
def frames_to_story(base64Frames, prompt, api_key):
|
@@ -59,7 +56,7 @@ def frames_to_story(base64Frames, prompt, api_key):
|
|
59 |
"role": "user",
|
60 |
"content": [
|
61 |
prompt,
|
62 |
-
*map(lambda x: {"image": x, "resize": 768}, base64Frames
|
63 |
],
|
64 |
},
|
65 |
]
|
@@ -71,47 +68,35 @@ def frames_to_story(base64Frames, prompt, api_key):
|
|
71 |
"max_tokens": 700,
|
72 |
}
|
73 |
result = openai.ChatCompletion.create(**params)
|
74 |
-
print(result.choices[0].message.content)
|
75 |
return result.choices[0].message.content
|
76 |
|
77 |
def text_to_audio(text, api_key, voice):
|
78 |
response = requests.post(
|
79 |
"https://api.openai.com/v1/audio/speech",
|
80 |
-
headers={
|
81 |
-
|
82 |
-
},
|
83 |
-
json={
|
84 |
-
"model": "tts-1",
|
85 |
-
"input": text,
|
86 |
-
"voice": voice,
|
87 |
-
},
|
88 |
)
|
89 |
-
|
90 |
if response.status_code != 200:
|
91 |
raise Exception("Request failed with status code")
|
92 |
-
|
93 |
-
audio_bytes_io = io.BytesIO()
|
94 |
-
for chunk in response.iter_content(chunk_size=1024*1024):
|
95 |
-
audio_bytes_io.write(chunk)
|
96 |
-
audio_bytes_io.seek(0)
|
97 |
-
|
98 |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
|
99 |
-
|
100 |
-
tmpfile.write(chunk)
|
101 |
audio_filename = tmpfile.name
|
102 |
-
|
103 |
-
return audio_filename
|
104 |
-
|
105 |
-
def merge_audio_video(video_filename, audio_filename, output_filename):
|
106 |
video_clip = VideoFileClip(video_filename)
|
107 |
audio_clip = AudioFileClip(audio_filename)
|
108 |
-
|
|
|
|
|
|
|
|
|
109 |
if audio_clip.duration > video_clip.duration:
|
110 |
-
|
111 |
-
extra_duration = audio_clip.duration - video_clip.duration
|
112 |
-
# Create a clip of the last frame for the duration of the difference
|
113 |
-
last_frame = video_clip.subclip(video_clip.duration - 1).to_ImageClip(duration=extra_duration)
|
114 |
-
# Concatenate the last frame clip to the end of the original video clip
|
115 |
video_clip = concatenate_videoclips([video_clip, last_frame])
|
116 |
|
117 |
final_clip = video_clip.set_audio(audio_clip)
|
@@ -121,58 +106,32 @@ def merge_audio_video(video_filename, audio_filename, output_filename):
|
|
121 |
|
122 |
return output_filename
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
# def merge_audio_video(video_filename, audio_filename, output_filename):
|
127 |
-
# print("Merging audio and video ...")
|
128 |
-
# video_clip = VideoFileClip(video_filename)
|
129 |
-
# audio_clip = AudioFileClip(audio_filename)
|
130 |
-
# final_clip = video_clip.set_audio(audio_clip)
|
131 |
-
# final_clip.write_videofile(output_filename, codec='libx264', audio_codec="aac")
|
132 |
-
# video_clip.close()
|
133 |
-
# audio_clip.close()
|
134 |
-
# return output_filename
|
135 |
-
|
136 |
-
|
137 |
def main():
|
138 |
st.set_page_config(page_title="AI Voiceover", page_icon="🔮")
|
139 |
st.title("Pixio Video to Voiceover 🎥🔮")
|
140 |
|
141 |
if not check_password():
|
142 |
return
|
143 |
-
|
144 |
openai_key = os.getenv('OPENAI_API_KEY')
|
145 |
if not openai_key:
|
146 |
st.error("OpenAI API key is not set in .env.local")
|
147 |
return
|
148 |
-
|
149 |
uploaded_file = st.file_uploader("Select a video file", type=["mp4", "avi"])
|
150 |
-
|
151 |
-
# Immediately after the video is uploaded, display a video preview
|
152 |
if uploaded_file is not None:
|
153 |
st.video(uploaded_file)
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
'Onyx (Male)': 'onyx',
|
159 |
-
'Nova (Female)': 'nova',
|
160 |
-
'Shimmer (Female)': 'shimmer',
|
161 |
-
'Alloy (Female)': 'alloy'
|
162 |
-
}
|
163 |
option = st.selectbox('Choose the voice you want', list(voice_options.keys()))
|
164 |
classify = voice_options[option]
|
165 |
|
166 |
-
duration_options = list(range(10, 121, 10))
|
167 |
selected_duration = st.selectbox('Select the desired video duration (seconds)', duration_options)
|
168 |
|
169 |
-
script_type_options = {
|
170 |
-
'Product Tutorial': 'Product Tutorial',
|
171 |
-
'TikTok': 'TikTok',
|
172 |
-
'YouTube Short': 'YouTube Short',
|
173 |
-
'Website Tutorial': 'Website Tutorial',
|
174 |
-
'General Info': 'General Info'
|
175 |
-
}
|
176 |
selected_script_type = st.selectbox('Choose the script generator type', list(script_type_options.keys()))
|
177 |
|
178 |
# Define unique prompt templates for each script type, including the dynamic content for "Product Tutorial"
|
@@ -191,33 +150,237 @@ def main():
|
|
191 |
'Website Tutorial': "Generate a short voiceover that is approximately {selected_duration} seconds long.Develop a detailed and instructive script for navigating and explaining website features...",
|
192 |
'General Info': "Generate a short voiceover that is approximately {selected_duration} seconds long.Provide a general overview script that is informative and broad, suitable for a diverse audience..."
|
193 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
-
# Generate the initial prompt based on selected script type
|
196 |
-
initial_prompt = script_templates[selected_script_type]
|
197 |
|
198 |
-
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
|
209 |
-
|
210 |
-
|
211 |
|
212 |
-
|
213 |
-
|
214 |
|
215 |
-
|
216 |
-
|
217 |
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
|
222 |
-
if __name__ == "__main__":
|
223 |
-
|
|
|
1 |
+
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeAudioClip
|
2 |
import streamlit as st
|
|
|
3 |
import cv2
|
4 |
import base64
|
5 |
import io
|
|
|
8 |
import requests
|
9 |
import tempfile
|
10 |
|
11 |
+
# Load environment variables
|
12 |
load_dotenv('.env.local')
|
13 |
|
14 |
def check_password():
|
|
|
16 |
if correct_password is None:
|
17 |
st.error("Password is not set in .env.local")
|
18 |
return False
|
|
|
19 |
user_password = st.text_input("Enter the password to proceed", type="password")
|
20 |
if user_password == correct_password:
|
21 |
return True
|
|
|
28 |
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile:
|
29 |
tmpfile.write(video_file.read())
|
30 |
video_filename = tmpfile.name
|
31 |
+
|
32 |
video_clip = VideoFileClip(video_filename)
|
33 |
video_duration = video_clip.duration
|
34 |
fps = video_clip.fps
|
|
|
37 |
video = cv2.VideoCapture(video_filename)
|
38 |
base64Frame = []
|
39 |
current_frame = 0
|
40 |
+
|
41 |
while video.isOpened():
|
42 |
success, frame = video.read()
|
43 |
if not success:
|
|
|
48 |
current_frame += 1
|
49 |
|
50 |
video.release()
|
|
|
51 |
return base64Frame, video_filename, video_duration
|
52 |
|
53 |
def frames_to_story(base64Frames, prompt, api_key):
|
|
|
56 |
"role": "user",
|
57 |
"content": [
|
58 |
prompt,
|
59 |
+
*map(lambda x: {"image": x, "resize": 768}, base64Frames),
|
60 |
],
|
61 |
},
|
62 |
]
|
|
|
68 |
"max_tokens": 700,
|
69 |
}
|
70 |
result = openai.ChatCompletion.create(**params)
|
|
|
71 |
return result.choices[0].message.content
|
72 |
|
73 |
def text_to_audio(text, api_key, voice):
|
74 |
response = requests.post(
|
75 |
"https://api.openai.com/v1/audio/speech",
|
76 |
+
headers={"Authorization": f"Bearer {api_key}"},
|
77 |
+
json={"model": "tts-1", "input": text, "voice": voice},
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
)
|
79 |
+
|
80 |
if response.status_code != 200:
|
81 |
raise Exception("Request failed with status code")
|
82 |
+
|
83 |
+
audio_bytes_io = io.BytesIO(response.content)
|
|
|
|
|
|
|
|
|
84 |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
|
85 |
+
tmpfile.write(audio_bytes_io.read())
|
|
|
86 |
audio_filename = tmpfile.name
|
87 |
+
|
88 |
+
return audio_filename
|
89 |
+
|
90 |
+
def merge_audio_video(video_filename, audio_filename, output_filename, overlay_audio_file=None):
|
91 |
video_clip = VideoFileClip(video_filename)
|
92 |
audio_clip = AudioFileClip(audio_filename)
|
93 |
+
|
94 |
+
if overlay_audio_file:
|
95 |
+
overlay_clip = AudioFileClip(overlay_audio_file.name).volumex(0.2)
|
96 |
+
audio_clip = CompositeAudioClip([audio_clip, overlay_clip.set_duration(audio_clip.duration)])
|
97 |
+
|
98 |
if audio_clip.duration > video_clip.duration:
|
99 |
+
last_frame = video_clip.to_ImageClip(t=video_clip.duration-1).set_duration(audio_clip.duration - video_clip.duration)
|
|
|
|
|
|
|
|
|
100 |
video_clip = concatenate_videoclips([video_clip, last_frame])
|
101 |
|
102 |
final_clip = video_clip.set_audio(audio_clip)
|
|
|
106 |
|
107 |
return output_filename
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
def main():
|
110 |
st.set_page_config(page_title="AI Voiceover", page_icon="🔮")
|
111 |
st.title("Pixio Video to Voiceover 🎥🔮")
|
112 |
|
113 |
if not check_password():
|
114 |
return
|
115 |
+
|
116 |
openai_key = os.getenv('OPENAI_API_KEY')
|
117 |
if not openai_key:
|
118 |
st.error("OpenAI API key is not set in .env.local")
|
119 |
return
|
120 |
+
|
121 |
uploaded_file = st.file_uploader("Select a video file", type=["mp4", "avi"])
|
|
|
|
|
122 |
if uploaded_file is not None:
|
123 |
st.video(uploaded_file)
|
124 |
+
|
125 |
+
overlay_audio_file = st.file_uploader("Upload overlay audio (optional)", type=["mp3", "wav"])
|
126 |
+
|
127 |
+
voice_options = {'Echo (Male)': 'echo', 'Fable (Male)': 'fable', 'Onyx (Male)': 'onyx', 'Nova (Female)': 'nova', 'Shimmer (Female)': 'shimmer', 'Alloy (Female)': 'alloy'}
|
|
|
|
|
|
|
|
|
|
|
128 |
option = st.selectbox('Choose the voice you want', list(voice_options.keys()))
|
129 |
classify = voice_options[option]
|
130 |
|
131 |
+
duration_options = list(range(10, 121, 10)) # 10 to 120 seconds, in 10-second intervals
|
132 |
selected_duration = st.selectbox('Select the desired video duration (seconds)', duration_options)
|
133 |
|
134 |
+
script_type_options = {'Product Tutorial': 'Product Tutorial', 'TikTok': 'TikTok', 'YouTube Short': 'YouTube Short', 'Website Tutorial': 'Website Tutorial', 'General Info': 'General Info'}
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
selected_script_type = st.selectbox('Choose the script generator type', list(script_type_options.keys()))
|
136 |
|
137 |
# Define unique prompt templates for each script type, including the dynamic content for "Product Tutorial"
|
|
|
150 |
'Website Tutorial': "Generate a short voiceover that is approximately {selected_duration} seconds long.Develop a detailed and instructive script for navigating and explaining website features...",
|
151 |
'General Info': "Generate a short voiceover that is approximately {selected_duration} seconds long.Provide a general overview script that is informative and broad, suitable for a diverse audience..."
|
152 |
}
|
153 |
+
prompt = "Customize your prompt based on the script type selected above and additional features added."
|
154 |
+
|
155 |
+
if uploaded_file is not None and st.button("START PROCESSING"):
|
156 |
+
with st.spinner("Processing..."):
|
157 |
+
base64Frame, video_filename, video_duration = video_to_frames(uploaded_file, 1)
|
158 |
+
text = frames_to_story(base64Frame, prompt, openai_key)
|
159 |
+
audio_filename = text_to_audio(text, openai_key, classify)
|
160 |
+
output_video_filename = os.path.splitext(video_filename)[0] + "_output.mp4"
|
161 |
+
final_video_filename = merge_audio_video(video_filename, audio_filename, output_video_filename, overlay_audio_file)
|
162 |
+
st.video(final_video_filename)
|
163 |
+
|
164 |
+
os.remove(video_filename)
|
165 |
+
os.remove(audio_filename)
|
166 |
+
os.remove(final_video_filename)
|
167 |
+
|
168 |
+
if __name__ == "__main__":
|
169 |
+
main()
|
170 |
|
|
|
|
|
171 |
|
172 |
+
# from dotenv import load_dotenv
|
173 |
+
# import streamlit as st
|
174 |
+
# from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip
|
175 |
+
# import cv2
|
176 |
+
# import base64
|
177 |
+
# import io
|
178 |
+
# import openai
|
179 |
+
# import os
|
180 |
+
# import requests
|
181 |
+
# import tempfile
|
182 |
|
183 |
+
# Load environment variables from .env.local
|
184 |
+
# load_dotenv('.env.local')
|
185 |
+
|
186 |
+
# def check_password():
|
187 |
+
# correct_password = os.getenv('PASSWORD')
|
188 |
+
# if correct_password is None:
|
189 |
+
# st.error("Password is not set in .env.local")
|
190 |
+
# return False
|
191 |
+
|
192 |
+
# user_password = st.text_input("Enter the password to proceed", type="password")
|
193 |
+
# if user_password == correct_password:
|
194 |
+
# return True
|
195 |
+
# else:
|
196 |
+
# if st.button("Check Password"):
|
197 |
+
# st.error("Incorrect password")
|
198 |
+
# return False
|
199 |
+
|
200 |
+
# def video_to_frames(video_file, frame_sampling_rate=1):
|
201 |
+
# with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile:
|
202 |
+
# tmpfile.write(video_file.read())
|
203 |
+
# video_filename = tmpfile.name
|
204 |
+
|
205 |
+
# video_clip = VideoFileClip(video_filename)
|
206 |
+
# video_duration = video_clip.duration
|
207 |
+
# fps = video_clip.fps
|
208 |
+
# frames_to_skip = int(fps * frame_sampling_rate)
|
209 |
+
|
210 |
+
# video = cv2.VideoCapture(video_filename)
|
211 |
+
# base64Frame = []
|
212 |
+
# current_frame = 0
|
213 |
+
|
214 |
+
# while video.isOpened():
|
215 |
+
# success, frame = video.read()
|
216 |
+
# if not success:
|
217 |
+
# break
|
218 |
+
# if current_frame % frames_to_skip == 0:
|
219 |
+
# _, buffer = cv2.imencode('.jpg', frame)
|
220 |
+
# base64Frame.append(base64.b64encode(buffer).decode("utf-8"))
|
221 |
+
# current_frame += 1
|
222 |
+
|
223 |
+
# video.release()
|
224 |
+
# print(f"{len(base64Frame)} frames read at a sampling rate of {frame_sampling_rate} second(s) per frame.")
|
225 |
+
# return base64Frame, video_filename, video_duration
|
226 |
+
|
227 |
+
# def frames_to_story(base64Frames, prompt, api_key):
|
228 |
+
# PROMPT_MESSAGES = [
|
229 |
+
# {
|
230 |
+
# "role": "user",
|
231 |
+
# "content": [
|
232 |
+
# prompt,
|
233 |
+
# *map(lambda x: {"image": x, "resize": 768}, base64Frames[0::50]),
|
234 |
+
# ],
|
235 |
+
# },
|
236 |
+
# ]
|
237 |
+
# params = {
|
238 |
+
# "model": "gpt-4-vision-preview",
|
239 |
+
# "messages": PROMPT_MESSAGES,
|
240 |
+
# "api_key": api_key,
|
241 |
+
# "headers": {"Openai-Version": "2020-11-07"},
|
242 |
+
# "max_tokens": 700,
|
243 |
+
# }
|
244 |
+
# result = openai.ChatCompletion.create(**params)
|
245 |
+
# print(result.choices[0].message.content)
|
246 |
+
# return result.choices[0].message.content
|
247 |
+
|
248 |
+
# def text_to_audio(text, api_key, voice):
|
249 |
+
# response = requests.post(
|
250 |
+
# "https://api.openai.com/v1/audio/speech",
|
251 |
+
# headers={
|
252 |
+
# "Authorization": f"Bearer {api_key}",
|
253 |
+
# },
|
254 |
+
# json={
|
255 |
+
# "model": "tts-1",
|
256 |
+
# "input": text,
|
257 |
+
# "voice": voice,
|
258 |
+
# },
|
259 |
+
# )
|
260 |
+
|
261 |
+
# if response.status_code != 200:
|
262 |
+
# raise Exception("Request failed with status code")
|
263 |
+
|
264 |
+
# audio_bytes_io = io.BytesIO()
|
265 |
+
# for chunk in response.iter_content(chunk_size=1024*1024):
|
266 |
+
# audio_bytes_io.write(chunk)
|
267 |
+
# audio_bytes_io.seek(0)
|
268 |
+
|
269 |
+
# with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
|
270 |
+
# for chunk in response.iter_content(chunk_size=1024*1024):
|
271 |
+
# tmpfile.write(chunk)
|
272 |
+
# audio_filename = tmpfile.name
|
273 |
+
|
274 |
+
# return audio_filename, audio_bytes_io
|
275 |
+
|
276 |
+
# def merge_audio_video(video_filename, audio_filename, output_filename):
|
277 |
+
# video_clip = VideoFileClip(video_filename)
|
278 |
+
# audio_clip = AudioFileClip(audio_filename)
|
279 |
+
|
280 |
+
# if audio_clip.duration > video_clip.duration:
|
281 |
+
# # Calculate the difference in durations
|
282 |
+
# extra_duration = audio_clip.duration - video_clip.duration
|
283 |
+
# # Create a clip of the last frame for the duration of the difference
|
284 |
+
# last_frame = video_clip.subclip(video_clip.duration - 1).to_ImageClip(duration=extra_duration)
|
285 |
+
# # Concatenate the last frame clip to the end of the original video clip
|
286 |
+
# video_clip = concatenate_videoclips([video_clip, last_frame])
|
287 |
+
|
288 |
+
# final_clip = video_clip.set_audio(audio_clip)
|
289 |
+
# final_clip.write_videofile(output_filename, codec='libx264', audio_codec="aac")
|
290 |
+
# video_clip.close()
|
291 |
+
# audio_clip.close()
|
292 |
+
|
293 |
+
# return output_filename
|
294 |
+
|
295 |
+
|
296 |
+
|
297 |
+
|
298 |
+
|
299 |
+
|
300 |
+
# def main():
|
301 |
+
# st.set_page_config(page_title="AI Voiceover", page_icon="🔮")
|
302 |
+
# st.title("Pixio Video to Voiceover 🎥🔮")
|
303 |
+
|
304 |
+
# if not check_password():
|
305 |
+
# return
|
306 |
+
|
307 |
+
# openai_key = os.getenv('OPENAI_API_KEY')
|
308 |
+
# if not openai_key:
|
309 |
+
# st.error("OpenAI API key is not set in .env.local")
|
310 |
+
# return
|
311 |
+
|
312 |
+
# uploaded_file = st.file_uploader("Select a video file", type=["mp4", "avi"])
|
313 |
+
|
314 |
+
# # Immediately after the video is uploaded, display a video preview
|
315 |
+
# if uploaded_file is not None:
|
316 |
+
# st.video(uploaded_file)
|
317 |
+
|
318 |
+
# voice_options = {
|
319 |
+
# 'Echo (Male)': 'echo',
|
320 |
+
# 'Fable (Male)': 'fable',
|
321 |
+
# 'Onyx (Male)': 'onyx',
|
322 |
+
# 'Nova (Female)': 'nova',
|
323 |
+
# 'Shimmer (Female)': 'shimmer',
|
324 |
+
# 'Alloy (Female)': 'alloy'
|
325 |
+
# }
|
326 |
+
# option = st.selectbox('Choose the voice you want', list(voice_options.keys()))
|
327 |
+
# classify = voice_options[option]
|
328 |
+
|
329 |
+
# duration_options = list(range(10, 121, 10)) # 10 to 120 seconds, in 10-second intervals
|
330 |
+
# selected_duration = st.selectbox('Select the desired video duration (seconds)', duration_options)
|
331 |
+
|
332 |
+
# script_type_options = {
|
333 |
+
# 'Product Tutorial': 'Product Tutorial',
|
334 |
+
# 'TikTok': 'TikTok',
|
335 |
+
# 'YouTube Short': 'YouTube Short',
|
336 |
+
# 'Website Tutorial': 'Website Tutorial',
|
337 |
+
# 'General Info': 'General Info'
|
338 |
+
# }
|
339 |
+
# selected_script_type = st.selectbox('Choose the script generator type', list(script_type_options.keys()))
|
340 |
+
|
341 |
+
# # Define unique prompt templates for each script type, including the dynamic content for "Product Tutorial"
|
342 |
+
# script_templates = {
|
343 |
+
# 'Product Tutorial': f"Generate a short voiceover that is approximately {selected_duration} seconds long.Your script should be limited to {selected_duration} seconds only! DO NOT exceed {selected_duration} seconds. Lets roleplay you are a script generator for tutorials. Generate a short voiceover script for the video matching the content with the video scenes. Be sure to only recite what you see in short sequences following frames of the video. You are allowed to comment on UI and UX even faces. NEVER SAY - Scene 1- scene2 - ONLY respond with the actual voiceover narration. Never add Timestamps to your response! You look at the website and create tutorial style content!! The app is called Pixio. The style should be fun and engaging. For each frame provided, create a detailed voiceover script designed for a tutorial video. Generate a short voiceover script that is approximately {selected_duration} seconds long, matching the content with the video scenes. " +
|
344 |
+
# "The app is called Pixio. The style should be fun and engaging. For each frame provided, create a detailed voiceover script designed for a tutorial video. " +
|
345 |
+
# "Never say 'Female 2' or 'VoiceOver' in responses. You output a script to be spoken! - Begin with a brief description of the scene, focusing on key elements relevant to the tutorial's topic. " +
|
346 |
+
# "- Provide step-by-step instructions or explanations for any actions, processes, or concepts shown in the frame. Use clear and concise language suitable for educational content. " +
|
347 |
+
# "- Highlight important details or features within the frame that the audience should pay attention to, explaining their significance in the context of the tutorial. " +
|
348 |
+
# "- Include questions or prompts when appropriate to encourage viewer engagement and reflection on the material presented. " +
|
349 |
+
# "- Where applicable, draw connections between the content in the current frame and previous frames to build a cohesive narrative or instructional flow. " +
|
350 |
+
# "- End with a short summary or teaser of what to expect next, maintaining the viewer’s interest and facilitating a smooth transition between sections of the tutorial. " +
|
351 |
+
# "The goal is to transform the visual information into an accessible and compelling educational narrative that enhances the viewer's understanding and retention of the subject matter.",
|
352 |
+
# 'TikTok': "Lets roleplay, in this Educational simulation your a dance coach., Generate a short voiceover that is approximately {selected_duration} seconds long. Your script should be limited to {selected_duration} seconds only! DO NOT exceed {selected_duration} seconds. You can comment on people places things. You specialize in dance moves. Your an expert dancer. Make GREAT commentary. Generate a short voiceover that is approximately 30 seconds long. Create a captivating and concise script , focusing on quick engagement. reply with just the voiceover narration not [Upbeat, encouraging tone]",
|
353 |
+
# 'YouTube Short': "Generate a short voiceover that is approximately {selected_duration} seconds long. Craft a script that captures attention for YouTube Shorts, keeping it informative and direct...",
|
354 |
+
# 'Website Tutorial': "Generate a short voiceover that is approximately {selected_duration} seconds long.Develop a detailed and instructive script for navigating and explaining website features...",
|
355 |
+
# 'General Info': "Generate a short voiceover that is approximately {selected_duration} seconds long.Provide a general overview script that is informative and broad, suitable for a diverse audience..."
|
356 |
+
# }
|
357 |
+
|
358 |
+
# # Generate the initial prompt based on selected script type
|
359 |
+
# initial_prompt = script_templates[selected_script_type]
|
360 |
+
|
361 |
+
# # Allow the user to edit the prompt
|
362 |
+
# prompt = st.text_area("Edit the voiceover script prompt as needed:", value=initial_prompt.format(selected_duration=selected_duration), height=300)
|
363 |
+
|
364 |
+
# if uploaded_file is not None and st.button("START PROCESSING", type="primary"):
|
365 |
+
# with st.spinner("Video is being processed..."):
|
366 |
+
# base64Frame, video_filename, video_duration = video_to_frames(uploaded_file, frame_sampling_rate=1)
|
367 |
|
368 |
+
# if video_duration > 120:
|
369 |
+
# st.error("The video exceeds the maximum allowed duration of 120 seconds.")
|
370 |
+
# return
|
371 |
|
372 |
+
# text = frames_to_story(base64Frame, prompt, openai_key)
|
373 |
+
# st.write(text)
|
374 |
|
375 |
+
# audio_filename, audio_bytes_io = text_to_audio(text, openai_key, classify)
|
376 |
+
# output_video_filename = os.path.splitext(video_filename)[0] + "_output.mp4"
|
377 |
|
378 |
+
# final_video_filename = merge_audio_video(video_filename, audio_filename, output_video_filename)
|
379 |
+
# st.video(final_video_filename)
|
380 |
|
381 |
+
# os.unlink(video_filename)
|
382 |
+
# os.unlink(audio_filename)
|
383 |
+
# os.unlink(final_video_filename)
|
384 |
|
385 |
+
# if __name__ == "__main__":
|
386 |
+
# main()
|