Update main.py
Browse files
main.py
CHANGED
@@ -1,614 +1,614 @@
|
|
1 |
-
import asyncio
|
2 |
-
import aiofiles
|
3 |
-
import asyncpraw
|
4 |
-
import os
|
5 |
-
import json
|
6 |
-
import logging
|
7 |
-
import tempfile
|
8 |
-
import numpy as np
|
9 |
-
import platform
|
10 |
-
import shutil
|
11 |
-
|
12 |
-
from pydantic import BaseModel
|
13 |
-
from typing import List, Optional
|
14 |
-
|
15 |
-
from fastapi import (
|
16 |
-
FastAPI,
|
17 |
-
BackgroundTasks,
|
18 |
-
Request,
|
19 |
-
HTTPException,
|
20 |
-
Body,
|
21 |
-
WebSocket,
|
22 |
-
WebSocketDisconnect,
|
23 |
-
)
|
24 |
-
from fastapi.responses import HTMLResponse, JSONResponse
|
25 |
-
from fastapi.staticfiles import StaticFiles
|
26 |
-
from fastapi.templating import Jinja2Templates
|
27 |
-
|
28 |
-
from moviepy.editor import (
|
29 |
-
VideoFileClip,
|
30 |
-
CompositeVideoClip,
|
31 |
-
TextClip,
|
32 |
-
ImageClip,
|
33 |
-
ColorClip,
|
34 |
-
)
|
35 |
-
from moviepy.config import change_settings
|
36 |
-
from PIL import Image, ImageDraw
|
37 |
-
|
38 |
-
import google.generativeai as genai
|
39 |
-
from google.generativeai.types import HarmBlockThreshold, HarmCategory
|
40 |
-
|
41 |
-
from concurrent.futures import ProcessPoolExecutor
|
42 |
-
|
43 |
-
# Import your RedDownloader (ensure it’s installed/in your PYTHONPATH)
|
44 |
-
from RedDownloader import RedDownloader
|
45 |
-
|
46 |
-
# Load environment variables (make sure you have a .env file with the keys)
|
47 |
-
from dotenv import load_dotenv
|
48 |
-
|
49 |
-
load_dotenv()
|
50 |
-
|
51 |
-
# Configure logging
|
52 |
-
logging.basicConfig(
|
53 |
-
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
54 |
-
)
|
55 |
-
|
56 |
-
# Global list to store live log messages
|
57 |
-
live_logs = []
|
58 |
-
|
59 |
-
|
60 |
-
class LiveLogHandler(logging.Handler):
|
61 |
-
def __init__(self):
|
62 |
-
super().__init__()
|
63 |
-
|
64 |
-
def emit(self, record):
|
65 |
-
log_entry = self.format(record)
|
66 |
-
live_logs.append(log_entry)
|
67 |
-
# Keep only the last 100 messages to avoid unbounded growth
|
68 |
-
if len(live_logs) > 100:
|
69 |
-
live_logs.pop(0)
|
70 |
-
|
71 |
-
|
72 |
-
# Add the custom logging handler to the root logger
|
73 |
-
live_log_handler = LiveLogHandler()
|
74 |
-
live_log_handler.setFormatter(
|
75 |
-
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
76 |
-
)
|
77 |
-
logging.getLogger().addHandler(live_log_handler)
|
78 |
-
|
79 |
-
if platform.system() == "Windows":
|
80 |
-
# Change ImageMagick binary path (adjust as needed)
|
81 |
-
change_settings(
|
82 |
-
{
|
83 |
-
"IMAGEMAGICK_BINARY": r"C:\Program Files\ImageMagick-7.1.1-Q16-HDRI\magick.exe"
|
84 |
-
}
|
85 |
-
)
|
86 |
-
|
87 |
-
# Constants and paths
|
88 |
-
BACKGROUND_IMAGE = "background.png"
|
89 |
-
PROCESSED_VIDEOS_FILE = "processed_videos.json"
|
90 |
-
MAX_VIDEO_DURATION = 60 # seconds
|
91 |
-
FONT_PATH = "font/Montserrat-Black.ttf"
|
92 |
-
OUTPUT_DIR = "output"
|
93 |
-
|
94 |
-
# Reddit API credentials from environment
|
95 |
-
CLIENT_ID = os.environ.get("REDDIT_CLIENT_ID")
|
96 |
-
CLIENT_SECRET = os.environ.get("REDDIT_CLIENT_SECRET")
|
97 |
-
USER_AGENT = os.environ.get("REDDIT_USER_AGENT")
|
98 |
-
USERNAME = os.environ.get("REDDIT_USERNAME")
|
99 |
-
PASSWORD = os.environ.get("REDDIT_PASSWORD")
|
100 |
-
|
101 |
-
# Global in‐memory settings and processing state
|
102 |
-
subreddits = [
|
103 |
-
"Damnthatsinteresting",
|
104 |
-
"interestingasfuck",
|
105 |
-
"BeAmazed",
|
106 |
-
"nextfuckinglevel",
|
107 |
-
]
|
108 |
-
processing_results = {} # keys: post id; values: { status, video_url, generated_title, generated_caption }
|
109 |
-
|
110 |
-
|
111 |
-
class VideoItem(BaseModel):
|
112 |
-
id: str
|
113 |
-
title: str
|
114 |
-
ups: int
|
115 |
-
url: str
|
116 |
-
subreddit: str
|
117 |
-
duration: Optional[int] = None
|
118 |
-
thumbnail: Optional[str] = None
|
119 |
-
|
120 |
-
|
121 |
-
# Limit the number of workers to a reasonable number (e.g., 2 or 4) to avoid overwhelming your system.
|
122 |
-
process_executor = ProcessPoolExecutor(max_workers=2)
|
123 |
-
|
124 |
-
# ------------------ Helper Functions (largely re‐using your code) ------------------
|
125 |
-
|
126 |
-
|
127 |
-
async def initialize_reddit():
|
128 |
-
return asyncpraw.Reddit(
|
129 |
-
client_id=CLIENT_ID,
|
130 |
-
client_secret=CLIENT_SECRET,
|
131 |
-
user_agent=USER_AGENT,
|
132 |
-
username=USERNAME,
|
133 |
-
password=PASSWORD,
|
134 |
-
)
|
135 |
-
|
136 |
-
|
137 |
-
async def load_processed_videos():
|
138 |
-
if os.path.exists(PROCESSED_VIDEOS_FILE):
|
139 |
-
async with aiofiles.open(PROCESSED_VIDEOS_FILE, "r") as f:
|
140 |
-
try:
|
141 |
-
return json.loads(await f.read())
|
142 |
-
except json.JSONDecodeError:
|
143 |
-
logging.error(
|
144 |
-
"Error decoding JSON in processed_videos.json. Starting with an empty list."
|
145 |
-
)
|
146 |
-
return []
|
147 |
-
return []
|
148 |
-
|
149 |
-
|
150 |
-
async def save_processed_videos(processed_videos):
|
151 |
-
async with aiofiles.open(PROCESSED_VIDEOS_FILE, "w") as f:
|
152 |
-
await f.write(json.dumps(processed_videos))
|
153 |
-
|
154 |
-
|
155 |
-
async def get_n_comments(submission, n=3):
|
156 |
-
"""Gets the top n comments from a Reddit submission."""
|
157 |
-
try:
|
158 |
-
comments = await submission.comments()
|
159 |
-
await comments.replace_more(limit=None)
|
160 |
-
top_comments = [comment.body for comment in comments]
|
161 |
-
return top_comments[:n]
|
162 |
-
except Exception as e:
|
163 |
-
logging.error(f"Error getting comments: {e}")
|
164 |
-
return []
|
165 |
-
|
166 |
-
|
167 |
-
async def fetch_trending_videos(reddit, processed_videos, subreddit_name):
|
168 |
-
"""Fetch trending video posts from a subreddit without downloading them."""
|
169 |
-
posts_list = []
|
170 |
-
try:
|
171 |
-
subreddit = await reddit.subreddit(subreddit_name)
|
172 |
-
async for post in subreddit.hot(limit=10):
|
173 |
-
if post.is_video and post.id not in processed_videos:
|
174 |
-
try:
|
175 |
-
duration = post.media["reddit_video"]["duration"]
|
176 |
-
if duration <= MAX_VIDEO_DURATION:
|
177 |
-
width = post.media["reddit_video"]["width"]
|
178 |
-
height = post.media["reddit_video"]["height"]
|
179 |
-
# Only include posts with a vertical-ish ratio
|
180 |
-
if (height / width) >= 1.6:
|
181 |
-
# --- TASK 4: Skip videos with no audio ---
|
182 |
-
if not post.media["reddit_video"].get("has_audio", True):
|
183 |
-
logging.warning(
|
184 |
-
f"Skipping post {post.id} due to no audio"
|
185 |
-
)
|
186 |
-
continue
|
187 |
-
posts_list.append(post)
|
188 |
-
else:
|
189 |
-
logging.warning(
|
190 |
-
f"Skipping post {post.id} due to ratio: width {width} > height {height}"
|
191 |
-
)
|
192 |
-
else:
|
193 |
-
logging.warning(
|
194 |
-
f"Skipping post {post.id} due to exceeding max duration."
|
195 |
-
)
|
196 |
-
except KeyError:
|
197 |
-
logging.warning(
|
198 |
-
f"Skipping post {post.id} due to missing video dimensions/duration."
|
199 |
-
)
|
200 |
-
return posts_list
|
201 |
-
except Exception as e:
|
202 |
-
logging.error(f"Error fetching videos from subreddit {subreddit_name}: {e}")
|
203 |
-
return []
|
204 |
-
|
205 |
-
|
206 |
-
async def download_video(url, filename):
|
207 |
-
"""
|
208 |
-
Offload the synchronous RedDownloader.Download call to a thread.
|
209 |
-
"""
|
210 |
-
loop = asyncio.get_running_loop()
|
211 |
-
|
212 |
-
def blocking_download():
|
213 |
-
try:
|
214 |
-
# This call is blocking.
|
215 |
-
RedDownloader.Download(url, quality=720, output=filename.split('.')[0])
|
216 |
-
return os.path.exists(filename)
|
217 |
-
except Exception as e:
|
218 |
-
logging.error(f"Error downloading video: {e}")
|
219 |
-
return False
|
220 |
-
|
221 |
-
exists = await loop.run_in_executor(None, blocking_download)
|
222 |
-
if exists:
|
223 |
-
logging.info(f"Video downloaded to {filename}")
|
224 |
-
return filename
|
225 |
-
else:
|
226 |
-
logging.error(f"Video file not found after download: {filename}")
|
227 |
-
return None
|
228 |
-
|
229 |
-
|
230 |
-
async def generate_title_and_caption(title, comments, first_frame, api_key):
|
231 |
-
genai.configure(api_key=api_key)
|
232 |
-
generation_config = genai.GenerationConfig(
|
233 |
-
temperature=1, max_output_tokens=8192, response_mime_type="application/json"
|
234 |
-
)
|
235 |
-
|
236 |
-
model = genai.GenerativeModel(
|
237 |
-
model_name="gemini-2.
|
238 |
-
generation_config=generation_config,
|
239 |
-
safety_settings={
|
240 |
-
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
241 |
-
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
242 |
-
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
243 |
-
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
244 |
-
},
|
245 |
-
system_instruction=(
|
246 |
-
"Generate a JSON object which contains a short youtube video title of max 15 words, "
|
247 |
-
"and also a very interesting and informational paragraph describing the subject and any informative interesting knowledge derived from the comments, "
|
248 |
-
"based on the provided video title, first frame image and comments info. Do not refer to the video or comments explicitly. "
|
249 |
-
"Return a JSON object with keys 'title' and 'caption'."
|
250 |
-
),
|
251 |
-
)
|
252 |
-
try:
|
253 |
-
with tempfile.NamedTemporaryFile(
|
254 |
-
suffix=".png", delete=False
|
255 |
-
) as temp_image_file:
|
256 |
-
image = Image.fromarray(first_frame, mode="RGB")
|
257 |
-
image.save(temp_image_file.name)
|
258 |
-
temp_image_file.flush()
|
259 |
-
image = Image.open(temp_image_file.name)
|
260 |
-
parts = [f"Title: {title}\n\nComments: {comments}", image]
|
261 |
-
response = await model.generate_content_async(
|
262 |
-
[{"role": "user", "parts": parts}]
|
263 |
-
)
|
264 |
-
image.close()
|
265 |
-
temp_image_file.close()
|
266 |
-
if os.path.exists(temp_image_file.name):
|
267 |
-
os.remove(temp_image_file.name)
|
268 |
-
|
269 |
-
try:
|
270 |
-
response_json = json.loads(response.text)
|
271 |
-
generated_title = response_json["title"]
|
272 |
-
generated_caption = response_json["caption"]
|
273 |
-
return generated_title, generated_caption
|
274 |
-
except (json.JSONDecodeError, KeyError, TypeError) as json_err:
|
275 |
-
logging.error(
|
276 |
-
f"Error parsing JSON response: {json_err}, Response Text: {response.text}"
|
277 |
-
)
|
278 |
-
return None, None
|
279 |
-
|
280 |
-
except Exception as e:
|
281 |
-
logging.error(f"Error generating title and caption: {e}")
|
282 |
-
return None, None
|
283 |
-
|
284 |
-
|
285 |
-
def round_corners(clip, radius):
|
286 |
-
mask = Image.new("RGB", clip.size, 0)
|
287 |
-
draw = ImageDraw.Draw(mask)
|
288 |
-
draw.rounded_rectangle([(0, 0), clip.size], radius, fill=255)
|
289 |
-
return clip.set_mask(ImageClip(np.array(mask), ismask=True))
|
290 |
-
|
291 |
-
|
292 |
-
def create_text_clip(text, font_area, font_size, color):
|
293 |
-
width = font_area[2] - font_area[0]
|
294 |
-
height = font_area[3] - font_area[1]
|
295 |
-
|
296 |
-
txt_clip = TextClip(
|
297 |
-
text,
|
298 |
-
fontsize=font_size,
|
299 |
-
color=color,
|
300 |
-
font=FONT_PATH,
|
301 |
-
size=(width, None),
|
302 |
-
method="caption",
|
303 |
-
align="center",
|
304 |
-
)
|
305 |
-
|
306 |
-
if txt_clip.h > height:
|
307 |
-
scale_factor = height / txt_clip.h
|
308 |
-
new_font_size = int(font_size * scale_factor)
|
309 |
-
txt_clip = TextClip(
|
310 |
-
text,
|
311 |
-
fontsize=new_font_size,
|
312 |
-
color=color,
|
313 |
-
font=FONT_PATH,
|
314 |
-
size=(width, None),
|
315 |
-
method="caption",
|
316 |
-
align="center",
|
317 |
-
)
|
318 |
-
|
319 |
-
vertical_offset = 8
|
320 |
-
txt_y = (height - txt_clip.h) / 4 + vertical_offset
|
321 |
-
|
322 |
-
bg_clip = ColorClip(size=(width, height), color=(0, 0, 0, 0))
|
323 |
-
|
324 |
-
final_clip = CompositeVideoClip(
|
325 |
-
[bg_clip, txt_clip.set_position((width / 2 - txt_clip.w / 2, txt_y))]
|
326 |
-
)
|
327 |
-
|
328 |
-
return final_clip.set_position((font_area[0], font_area[1]))
|
329 |
-
|
330 |
-
|
331 |
-
def calculate_text_color(frame, x, y, width, height):
|
332 |
-
region = frame[y : y + height, x : x + width]
|
333 |
-
if region.shape[-1] == 3:
|
334 |
-
grayscale = np.dot(region[..., :3], [0.2989, 0.5870, 0.1140])
|
335 |
-
else:
|
336 |
-
grayscale = region
|
337 |
-
mean_brightness = np.mean(grayscale)
|
338 |
-
relative_luminance = mean_brightness / 255
|
339 |
-
white_contrast = (1.0 + 0.05) / (relative_luminance + 0.05)
|
340 |
-
black_contrast = (relative_luminance + 0.05) / (0.0 + 0.05)
|
341 |
-
return "white" if white_contrast > black_contrast else "black"
|
342 |
-
|
343 |
-
|
344 |
-
def create_watermark_clip(
|
345 |
-
watermark_text, frame, x_pos, y_pos, width, height, video_duration
|
346 |
-
):
|
347 |
-
optimal_color = calculate_text_color(frame, int(x_pos), int(y_pos), width, height)
|
348 |
-
watermark_clip = TextClip(
|
349 |
-
watermark_text,
|
350 |
-
fontsize=30,
|
351 |
-
color=optimal_color,
|
352 |
-
font=FONT_PATH,
|
353 |
-
)
|
354 |
-
watermark_width = watermark_clip.w
|
355 |
-
watermark_x = (720 - watermark_width) / 2
|
356 |
-
return watermark_clip.set_position((watermark_x, 1140)).set_duration(video_duration)
|
357 |
-
|
358 |
-
|
359 |
-
def generate_video(
|
360 |
-
video_path, title, comments, api_key, post_id, watermark_text="@damnsointeresting"
|
361 |
-
):
|
362 |
-
try:
|
363 |
-
output_dir = os.path.join(OUTPUT_DIR, post_id)
|
364 |
-
os.makedirs(output_dir, exist_ok=True)
|
365 |
-
video = VideoFileClip(video_path)
|
366 |
-
first_frame = video.get_frame(0)
|
367 |
-
bg = ImageClip(BACKGROUND_IMAGE).set_duration(video.duration)
|
368 |
-
audio = video.audio
|
369 |
-
|
370 |
-
video_area = (40, 290, 680, 1220)
|
371 |
-
video_width = video_area[2] - video_area[0]
|
372 |
-
video_height = video_area[3] - video_area[1]
|
373 |
-
video_ratio = video.w / video.h
|
374 |
-
if video_ratio > video_width / video_height:
|
375 |
-
new_width, new_height = video_width, int(video_width / video_ratio)
|
376 |
-
else:
|
377 |
-
new_width, new_height = int(video_height * video_ratio), video_height
|
378 |
-
video = video.resize(width=new_width, height=new_height)
|
379 |
-
x_pos = video_area[0] + (video_width - new_width) / 2
|
380 |
-
y_pos = video_area[1] + (video_height - new_height) / 2
|
381 |
-
video = video.set_position((x_pos, y_pos))
|
382 |
-
video = round_corners(video, 40)
|
383 |
-
|
384 |
-
generated_title, generated_caption = asyncio.run(
|
385 |
-
generate_title_and_caption(title, comments, first_frame, api_key)
|
386 |
-
)
|
387 |
-
if not generated_title or not generated_caption:
|
388 |
-
logging.error("Failed to generate title or caption. Skipping video.")
|
389 |
-
return None, None
|
390 |
-
|
391 |
-
title_clip = create_text_clip(
|
392 |
-
generated_title, (45, 190, 675, 285), 35, "white"
|
393 |
-
).set_duration(video.duration)
|
394 |
-
watermark_clip = create_watermark_clip(
|
395 |
-
watermark_text, first_frame, 210, 1140, 300, 40, video.duration
|
396 |
-
)
|
397 |
-
|
398 |
-
final = CompositeVideoClip(
|
399 |
-
[bg, video, title_clip, watermark_clip], size=(720, 1280)
|
400 |
-
)
|
401 |
-
final = final.set_duration(video.duration)
|
402 |
-
|
403 |
-
if audio:
|
404 |
-
final = final.set_audio(audio)
|
405 |
-
|
406 |
-
output_filename = os.path.join(output_dir, f"{post_id}.mp4")
|
407 |
-
final.write_videofile(output_filename, fps=30)
|
408 |
-
|
409 |
-
caption_filepath = os.path.join(output_dir, f"{post_id}.txt")
|
410 |
-
with open(caption_filepath, "w") as f:
|
411 |
-
f.write(
|
412 |
-
f"Title:\n{generated_title.strip()}\n\nCaption:\n{generated_caption.strip()}"
|
413 |
-
)
|
414 |
-
|
415 |
-
return output_filename, generated_title, generated_caption
|
416 |
-
except Exception as e:
|
417 |
-
logging.error(f"Error processing video: {e}")
|
418 |
-
return None, None, None
|
419 |
-
finally:
|
420 |
-
if "video" in locals():
|
421 |
-
video.close()
|
422 |
-
if "final" in locals():
|
423 |
-
final.close()
|
424 |
-
|
425 |
-
|
426 |
-
# ------------------ FastAPI App and Endpoints ------------------
|
427 |
-
|
428 |
-
app = FastAPI()
|
429 |
-
|
430 |
-
# Mount static directories (for output videos and any static assets)
|
431 |
-
if not os.path.exists(OUTPUT_DIR):
|
432 |
-
os.makedirs(OUTPUT_DIR)
|
433 |
-
if not os.path.exists("static"):
|
434 |
-
os.makedirs("static")
|
435 |
-
app.mount("/output", StaticFiles(directory=OUTPUT_DIR), name="output")
|
436 |
-
app.mount("/static", StaticFiles(directory="static"), name="static")
|
437 |
-
|
438 |
-
templates = Jinja2Templates(directory="templates")
|
439 |
-
|
440 |
-
|
441 |
-
# Serve the index HTML page
|
442 |
-
@app.get("/", response_class=HTMLResponse)
|
443 |
-
async def index(request: Request):
|
444 |
-
return templates.TemplateResponse("index.html", {"request": request})
|
445 |
-
|
446 |
-
|
447 |
-
@app.get("/api/videos")
|
448 |
-
async def get_videos():
|
449 |
-
reddit = await initialize_reddit()
|
450 |
-
processed_videos = await load_processed_videos()
|
451 |
-
all_posts = []
|
452 |
-
for subreddit_name in subreddits:
|
453 |
-
posts = await fetch_trending_videos(reddit, processed_videos, subreddit_name)
|
454 |
-
for post in posts:
|
455 |
-
try:
|
456 |
-
duration = post.media["reddit_video"]["duration"]
|
457 |
-
except KeyError:
|
458 |
-
duration = None
|
459 |
-
|
460 |
-
# Add preview_video if available from the reddit_video media.
|
461 |
-
preview_video = None
|
462 |
-
try:
|
463 |
-
reddit_video = post.media.get("reddit_video")
|
464 |
-
if reddit_video and "fallback_url" in reddit_video:
|
465 |
-
preview_video = reddit_video["fallback_url"]
|
466 |
-
except Exception:
|
467 |
-
preview_video = None
|
468 |
-
|
469 |
-
video_info = {
|
470 |
-
"id": post.id,
|
471 |
-
"title": post.title,
|
472 |
-
"ups": post.ups,
|
473 |
-
"url": post.url,
|
474 |
-
"subreddit": subreddit_name,
|
475 |
-
"duration": duration,
|
476 |
-
"thumbnail": post.thumbnail if hasattr(post, "thumbnail") else None,
|
477 |
-
"preview_video": preview_video,
|
478 |
-
}
|
479 |
-
all_posts.append(video_info)
|
480 |
-
await reddit.close()
|
481 |
-
all_posts.sort(key=lambda x: x["ups"], reverse=True)
|
482 |
-
return JSONResponse(content=all_posts)
|
483 |
-
|
484 |
-
|
485 |
-
@app.post("/api/process")
|
486 |
-
async def process_selected_videos(
|
487 |
-
background_tasks: BackgroundTasks,
|
488 |
-
videos: List[VideoItem] = Body(...),
|
489 |
-
):
|
490 |
-
if not videos:
|
491 |
-
raise HTTPException(status_code=400, detail="No videos provided")
|
492 |
-
for video_info in videos:
|
493 |
-
video_id = video_info.id # now you can access attributes directly
|
494 |
-
if video_id in processing_results:
|
495 |
-
continue # already processing or processed
|
496 |
-
processing_results[video_id] = {"status": "pending"}
|
497 |
-
background_tasks.add_task(process_video_task, video_info.dict())
|
498 |
-
return JSONResponse(
|
499 |
-
content={
|
500 |
-
"message": "Processing started",
|
501 |
-
"video_ids": [video.id for video in videos],
|
502 |
-
}
|
503 |
-
)
|
504 |
-
|
505 |
-
|
506 |
-
async def process_video_task(video_info: dict):
|
507 |
-
video_id = video_info.get("id")
|
508 |
-
try:
|
509 |
-
api_key = os.environ.get("GOOGLE_API_KEY")
|
510 |
-
if not api_key:
|
511 |
-
processing_results[video_id] = {"status": "error", "error": "Missing GOOGLE_API_KEY"}
|
512 |
-
return
|
513 |
-
|
514 |
-
reddit = await initialize_reddit()
|
515 |
-
submission = await reddit.submission(id=video_id)
|
516 |
-
comments = await get_n_comments(submission, 5)
|
517 |
-
comments_string = "\n\n".join(comments) if comments else "No comments found."
|
518 |
-
|
519 |
-
temp_video_filename = f"{video_id}.mp4"
|
520 |
-
downloaded_video = await download_video(video_info.get("url"), temp_video_filename)
|
521 |
-
if not downloaded_video:
|
522 |
-
processing_results[video_id] = {"status": "error", "error": "Failed to download video"}
|
523 |
-
await reddit.close()
|
524 |
-
return
|
525 |
-
|
526 |
-
# Offload heavy MoviePy processing using the process pool.
|
527 |
-
loop = asyncio.get_running_loop()
|
528 |
-
result = await loop.run_in_executor(
|
529 |
-
process_executor,
|
530 |
-
generate_video, # blocking function using MoviePy
|
531 |
-
downloaded_video,
|
532 |
-
video_info.get("title"),
|
533 |
-
comments_string,
|
534 |
-
api_key,
|
535 |
-
f"{video_id}",
|
536 |
-
"@damnsointeresting"
|
537 |
-
)
|
538 |
-
|
539 |
-
if result and result[0]:
|
540 |
-
output_file, generated_title, generated_caption = result
|
541 |
-
video_url = f"/output/{video_id}/{video_id}.mp4"
|
542 |
-
processing_results[video_id] = {
|
543 |
-
"status": "completed",
|
544 |
-
"video_url": video_url,
|
545 |
-
"generated_title": generated_title,
|
546 |
-
"generated_caption": generated_caption
|
547 |
-
}
|
548 |
-
else:
|
549 |
-
processing_results[video_id] = {"status": "error", "error": "Video processing failed"}
|
550 |
-
|
551 |
-
if os.path.exists(temp_video_filename):
|
552 |
-
os.remove(temp_video_filename)
|
553 |
-
await reddit.close()
|
554 |
-
except Exception as e:
|
555 |
-
processing_results[video_id] = {"status": "error", "error": str(e)}
|
556 |
-
|
557 |
-
|
558 |
-
@app.get("/api/results")
|
559 |
-
async def get_results():
|
560 |
-
return JSONResponse(content=processing_results)
|
561 |
-
|
562 |
-
|
563 |
-
@app.get("/api/settings")
|
564 |
-
async def get_settings():
|
565 |
-
return JSONResponse(content={"subreddits": subreddits})
|
566 |
-
|
567 |
-
|
568 |
-
@app.post("/api/settings")
|
569 |
-
async def update_settings(data: dict):
|
570 |
-
new_subs = data.get("subreddits")
|
571 |
-
if not isinstance(new_subs, list):
|
572 |
-
raise HTTPException(status_code=400, detail="subreddits must be a list")
|
573 |
-
global subreddits
|
574 |
-
subreddits = new_subs
|
575 |
-
return JSONResponse(
|
576 |
-
content={"message": "Settings updated", "subreddits": subreddits}
|
577 |
-
)
|
578 |
-
|
579 |
-
|
580 |
-
@app.delete("/api/video/{video_id}")
|
581 |
-
async def delete_video(video_id: str):
|
582 |
-
video_folder = os.path.join(OUTPUT_DIR, video_id)
|
583 |
-
if os.path.exists(video_folder):
|
584 |
-
try:
|
585 |
-
shutil.rmtree(video_folder)
|
586 |
-
# Remove from our in‐memory processing_results if present.
|
587 |
-
if video_id in processing_results:
|
588 |
-
del processing_results[video_id]
|
589 |
-
return JSONResponse(content={"message": f"Video {video_id} deleted."})
|
590 |
-
except Exception as e:
|
591 |
-
raise HTTPException(status_code=500, detail=f"Error deleting video: {e}")
|
592 |
-
else:
|
593 |
-
raise HTTPException(status_code=404, detail="Video not found.")
|
594 |
-
|
595 |
-
|
596 |
-
@app.websocket("/ws/logs")
|
597 |
-
async def websocket_logs(websocket: WebSocket):
|
598 |
-
await websocket.accept()
|
599 |
-
last_index = 0
|
600 |
-
try:
|
601 |
-
while True:
|
602 |
-
# Sleep briefly to prevent busy looping
|
603 |
-
await asyncio.sleep(1)
|
604 |
-
# If new log messages are available, send them to the client.
|
605 |
-
if last_index < len(live_logs):
|
606 |
-
for log in live_logs[last_index:]:
|
607 |
-
await websocket.send_text(log)
|
608 |
-
last_index = len(live_logs)
|
609 |
-
except WebSocketDisconnect:
|
610 |
-
logging.info("Client disconnected from live logs websocket")
|
611 |
-
|
612 |
-
|
613 |
-
# To run the app:
|
614 |
-
# uvicorn main:app --reload
|
|
|
1 |
+
import asyncio
|
2 |
+
import aiofiles
|
3 |
+
import asyncpraw
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
import logging
|
7 |
+
import tempfile
|
8 |
+
import numpy as np
|
9 |
+
import platform
|
10 |
+
import shutil
|
11 |
+
|
12 |
+
from pydantic import BaseModel
|
13 |
+
from typing import List, Optional
|
14 |
+
|
15 |
+
from fastapi import (
|
16 |
+
FastAPI,
|
17 |
+
BackgroundTasks,
|
18 |
+
Request,
|
19 |
+
HTTPException,
|
20 |
+
Body,
|
21 |
+
WebSocket,
|
22 |
+
WebSocketDisconnect,
|
23 |
+
)
|
24 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
25 |
+
from fastapi.staticfiles import StaticFiles
|
26 |
+
from fastapi.templating import Jinja2Templates
|
27 |
+
|
28 |
+
from moviepy.editor import (
|
29 |
+
VideoFileClip,
|
30 |
+
CompositeVideoClip,
|
31 |
+
TextClip,
|
32 |
+
ImageClip,
|
33 |
+
ColorClip,
|
34 |
+
)
|
35 |
+
from moviepy.config import change_settings
|
36 |
+
from PIL import Image, ImageDraw
|
37 |
+
|
38 |
+
import google.generativeai as genai
|
39 |
+
from google.generativeai.types import HarmBlockThreshold, HarmCategory
|
40 |
+
|
41 |
+
from concurrent.futures import ProcessPoolExecutor
|
42 |
+
|
43 |
+
# Import your RedDownloader (ensure it’s installed/in your PYTHONPATH)
|
44 |
+
from RedDownloader import RedDownloader
|
45 |
+
|
46 |
+
# Load environment variables (make sure you have a .env file with the keys)
|
47 |
+
from dotenv import load_dotenv
|
48 |
+
|
49 |
+
load_dotenv()
|
50 |
+
|
51 |
+
# Configure logging
|
52 |
+
logging.basicConfig(
|
53 |
+
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
54 |
+
)
|
55 |
+
|
56 |
+
# Global list to store live log messages
|
57 |
+
live_logs = []
|
58 |
+
|
59 |
+
|
60 |
+
class LiveLogHandler(logging.Handler):
|
61 |
+
def __init__(self):
|
62 |
+
super().__init__()
|
63 |
+
|
64 |
+
def emit(self, record):
|
65 |
+
log_entry = self.format(record)
|
66 |
+
live_logs.append(log_entry)
|
67 |
+
# Keep only the last 100 messages to avoid unbounded growth
|
68 |
+
if len(live_logs) > 100:
|
69 |
+
live_logs.pop(0)
|
70 |
+
|
71 |
+
|
72 |
+
# Add the custom logging handler to the root logger
|
73 |
+
live_log_handler = LiveLogHandler()
|
74 |
+
live_log_handler.setFormatter(
|
75 |
+
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
76 |
+
)
|
77 |
+
logging.getLogger().addHandler(live_log_handler)
|
78 |
+
|
79 |
+
if platform.system() == "Windows":
|
80 |
+
# Change ImageMagick binary path (adjust as needed)
|
81 |
+
change_settings(
|
82 |
+
{
|
83 |
+
"IMAGEMAGICK_BINARY": r"C:\Program Files\ImageMagick-7.1.1-Q16-HDRI\magick.exe"
|
84 |
+
}
|
85 |
+
)
|
86 |
+
|
87 |
+
# Constants and paths
|
88 |
+
BACKGROUND_IMAGE = "background.png"
|
89 |
+
PROCESSED_VIDEOS_FILE = "processed_videos.json"
|
90 |
+
MAX_VIDEO_DURATION = 60 # seconds
|
91 |
+
FONT_PATH = "font/Montserrat-Black.ttf"
|
92 |
+
OUTPUT_DIR = "output"
|
93 |
+
|
94 |
+
# Reddit API credentials from environment
|
95 |
+
CLIENT_ID = os.environ.get("REDDIT_CLIENT_ID")
|
96 |
+
CLIENT_SECRET = os.environ.get("REDDIT_CLIENT_SECRET")
|
97 |
+
USER_AGENT = os.environ.get("REDDIT_USER_AGENT")
|
98 |
+
USERNAME = os.environ.get("REDDIT_USERNAME")
|
99 |
+
PASSWORD = os.environ.get("REDDIT_PASSWORD")
|
100 |
+
|
101 |
+
# Global in‐memory settings and processing state
|
102 |
+
subreddits = [
|
103 |
+
"Damnthatsinteresting",
|
104 |
+
"interestingasfuck",
|
105 |
+
"BeAmazed",
|
106 |
+
"nextfuckinglevel",
|
107 |
+
]
|
108 |
+
processing_results = {} # keys: post id; values: { status, video_url, generated_title, generated_caption }
|
109 |
+
|
110 |
+
|
111 |
+
class VideoItem(BaseModel):
|
112 |
+
id: str
|
113 |
+
title: str
|
114 |
+
ups: int
|
115 |
+
url: str
|
116 |
+
subreddit: str
|
117 |
+
duration: Optional[int] = None
|
118 |
+
thumbnail: Optional[str] = None
|
119 |
+
|
120 |
+
|
121 |
+
# Limit the number of workers to a reasonable number (e.g., 2 or 4) to avoid overwhelming your system.
|
122 |
+
process_executor = ProcessPoolExecutor(max_workers=2)
|
123 |
+
|
124 |
+
# ------------------ Helper Functions (largely re‐using your code) ------------------
|
125 |
+
|
126 |
+
|
127 |
+
async def initialize_reddit():
|
128 |
+
return asyncpraw.Reddit(
|
129 |
+
client_id=CLIENT_ID,
|
130 |
+
client_secret=CLIENT_SECRET,
|
131 |
+
user_agent=USER_AGENT,
|
132 |
+
username=USERNAME,
|
133 |
+
password=PASSWORD,
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
async def load_processed_videos():
|
138 |
+
if os.path.exists(PROCESSED_VIDEOS_FILE):
|
139 |
+
async with aiofiles.open(PROCESSED_VIDEOS_FILE, "r") as f:
|
140 |
+
try:
|
141 |
+
return json.loads(await f.read())
|
142 |
+
except json.JSONDecodeError:
|
143 |
+
logging.error(
|
144 |
+
"Error decoding JSON in processed_videos.json. Starting with an empty list."
|
145 |
+
)
|
146 |
+
return []
|
147 |
+
return []
|
148 |
+
|
149 |
+
|
150 |
+
async def save_processed_videos(processed_videos):
|
151 |
+
async with aiofiles.open(PROCESSED_VIDEOS_FILE, "w") as f:
|
152 |
+
await f.write(json.dumps(processed_videos))
|
153 |
+
|
154 |
+
|
155 |
+
async def get_n_comments(submission, n=3):
|
156 |
+
"""Gets the top n comments from a Reddit submission."""
|
157 |
+
try:
|
158 |
+
comments = await submission.comments()
|
159 |
+
await comments.replace_more(limit=None)
|
160 |
+
top_comments = [comment.body for comment in comments]
|
161 |
+
return top_comments[:n]
|
162 |
+
except Exception as e:
|
163 |
+
logging.error(f"Error getting comments: {e}")
|
164 |
+
return []
|
165 |
+
|
166 |
+
|
167 |
+
async def fetch_trending_videos(reddit, processed_videos, subreddit_name):
|
168 |
+
"""Fetch trending video posts from a subreddit without downloading them."""
|
169 |
+
posts_list = []
|
170 |
+
try:
|
171 |
+
subreddit = await reddit.subreddit(subreddit_name)
|
172 |
+
async for post in subreddit.hot(limit=10):
|
173 |
+
if post.is_video and post.id not in processed_videos:
|
174 |
+
try:
|
175 |
+
duration = post.media["reddit_video"]["duration"]
|
176 |
+
if duration <= MAX_VIDEO_DURATION:
|
177 |
+
width = post.media["reddit_video"]["width"]
|
178 |
+
height = post.media["reddit_video"]["height"]
|
179 |
+
# Only include posts with a vertical-ish ratio
|
180 |
+
if (height / width) >= 1.6:
|
181 |
+
# --- TASK 4: Skip videos with no audio ---
|
182 |
+
if not post.media["reddit_video"].get("has_audio", True):
|
183 |
+
logging.warning(
|
184 |
+
f"Skipping post {post.id} due to no audio"
|
185 |
+
)
|
186 |
+
continue
|
187 |
+
posts_list.append(post)
|
188 |
+
else:
|
189 |
+
logging.warning(
|
190 |
+
f"Skipping post {post.id} due to ratio: width {width} > height {height}"
|
191 |
+
)
|
192 |
+
else:
|
193 |
+
logging.warning(
|
194 |
+
f"Skipping post {post.id} due to exceeding max duration."
|
195 |
+
)
|
196 |
+
except KeyError:
|
197 |
+
logging.warning(
|
198 |
+
f"Skipping post {post.id} due to missing video dimensions/duration."
|
199 |
+
)
|
200 |
+
return posts_list
|
201 |
+
except Exception as e:
|
202 |
+
logging.error(f"Error fetching videos from subreddit {subreddit_name}: {e}")
|
203 |
+
return []
|
204 |
+
|
205 |
+
|
206 |
+
async def download_video(url, filename):
|
207 |
+
"""
|
208 |
+
Offload the synchronous RedDownloader.Download call to a thread.
|
209 |
+
"""
|
210 |
+
loop = asyncio.get_running_loop()
|
211 |
+
|
212 |
+
def blocking_download():
|
213 |
+
try:
|
214 |
+
# This call is blocking.
|
215 |
+
RedDownloader.Download(url, quality=720, output=filename.split('.')[0])
|
216 |
+
return os.path.exists(filename)
|
217 |
+
except Exception as e:
|
218 |
+
logging.error(f"Error downloading video: {e}")
|
219 |
+
return False
|
220 |
+
|
221 |
+
exists = await loop.run_in_executor(None, blocking_download)
|
222 |
+
if exists:
|
223 |
+
logging.info(f"Video downloaded to {filename}")
|
224 |
+
return filename
|
225 |
+
else:
|
226 |
+
logging.error(f"Video file not found after download: {filename}")
|
227 |
+
return None
|
228 |
+
|
229 |
+
|
230 |
+
async def generate_title_and_caption(title, comments, first_frame, api_key):
|
231 |
+
genai.configure(api_key=api_key)
|
232 |
+
generation_config = genai.GenerationConfig(
|
233 |
+
temperature=1, max_output_tokens=8192, response_mime_type="application/json"
|
234 |
+
)
|
235 |
+
|
236 |
+
model = genai.GenerativeModel(
|
237 |
+
model_name="gemini-2.5-flash",
|
238 |
+
generation_config=generation_config,
|
239 |
+
safety_settings={
|
240 |
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
241 |
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
242 |
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
243 |
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
244 |
+
},
|
245 |
+
system_instruction=(
|
246 |
+
"Generate a JSON object which contains a short catchy youtube video title of max 15 words to place over the video, "
|
247 |
+
"and also a very interesting and informational paragraph describing the subject and any informative interesting knowledge derived from the comments, "
|
248 |
+
"based on the provided video title, first frame image and comments info. Do not refer to the video or comments explicitly. "
|
249 |
+
"Return a JSON object with keys 'title' and 'caption'."
|
250 |
+
),
|
251 |
+
)
|
252 |
+
try:
|
253 |
+
with tempfile.NamedTemporaryFile(
|
254 |
+
suffix=".png", delete=False
|
255 |
+
) as temp_image_file:
|
256 |
+
image = Image.fromarray(first_frame, mode="RGB")
|
257 |
+
image.save(temp_image_file.name)
|
258 |
+
temp_image_file.flush()
|
259 |
+
image = Image.open(temp_image_file.name)
|
260 |
+
parts = [f"Title: {title}\n\nComments: {comments}", image]
|
261 |
+
response = await model.generate_content_async(
|
262 |
+
[{"role": "user", "parts": parts}]
|
263 |
+
)
|
264 |
+
image.close()
|
265 |
+
temp_image_file.close()
|
266 |
+
if os.path.exists(temp_image_file.name):
|
267 |
+
os.remove(temp_image_file.name)
|
268 |
+
|
269 |
+
try:
|
270 |
+
response_json = json.loads(response.text)
|
271 |
+
generated_title = response_json["title"]
|
272 |
+
generated_caption = response_json["caption"]
|
273 |
+
return generated_title, generated_caption
|
274 |
+
except (json.JSONDecodeError, KeyError, TypeError) as json_err:
|
275 |
+
logging.error(
|
276 |
+
f"Error parsing JSON response: {json_err}, Response Text: {response.text}"
|
277 |
+
)
|
278 |
+
return None, None
|
279 |
+
|
280 |
+
except Exception as e:
|
281 |
+
logging.error(f"Error generating title and caption: {e}")
|
282 |
+
return None, None
|
283 |
+
|
284 |
+
|
285 |
+
def round_corners(clip, radius):
|
286 |
+
mask = Image.new("RGB", clip.size, 0)
|
287 |
+
draw = ImageDraw.Draw(mask)
|
288 |
+
draw.rounded_rectangle([(0, 0), clip.size], radius, fill=255)
|
289 |
+
return clip.set_mask(ImageClip(np.array(mask), ismask=True))
|
290 |
+
|
291 |
+
|
292 |
+
def create_text_clip(text, font_area, font_size, color):
|
293 |
+
width = font_area[2] - font_area[0]
|
294 |
+
height = font_area[3] - font_area[1]
|
295 |
+
|
296 |
+
txt_clip = TextClip(
|
297 |
+
text,
|
298 |
+
fontsize=font_size,
|
299 |
+
color=color,
|
300 |
+
font=FONT_PATH,
|
301 |
+
size=(width, None),
|
302 |
+
method="caption",
|
303 |
+
align="center",
|
304 |
+
)
|
305 |
+
|
306 |
+
if txt_clip.h > height:
|
307 |
+
scale_factor = height / txt_clip.h
|
308 |
+
new_font_size = int(font_size * scale_factor)
|
309 |
+
txt_clip = TextClip(
|
310 |
+
text,
|
311 |
+
fontsize=new_font_size,
|
312 |
+
color=color,
|
313 |
+
font=FONT_PATH,
|
314 |
+
size=(width, None),
|
315 |
+
method="caption",
|
316 |
+
align="center",
|
317 |
+
)
|
318 |
+
|
319 |
+
vertical_offset = 8
|
320 |
+
txt_y = (height - txt_clip.h) / 4 + vertical_offset
|
321 |
+
|
322 |
+
bg_clip = ColorClip(size=(width, height), color=(0, 0, 0, 0))
|
323 |
+
|
324 |
+
final_clip = CompositeVideoClip(
|
325 |
+
[bg_clip, txt_clip.set_position((width / 2 - txt_clip.w / 2, txt_y))]
|
326 |
+
)
|
327 |
+
|
328 |
+
return final_clip.set_position((font_area[0], font_area[1]))
|
329 |
+
|
330 |
+
|
331 |
+
def calculate_text_color(frame, x, y, width, height):
|
332 |
+
region = frame[y : y + height, x : x + width]
|
333 |
+
if region.shape[-1] == 3:
|
334 |
+
grayscale = np.dot(region[..., :3], [0.2989, 0.5870, 0.1140])
|
335 |
+
else:
|
336 |
+
grayscale = region
|
337 |
+
mean_brightness = np.mean(grayscale)
|
338 |
+
relative_luminance = mean_brightness / 255
|
339 |
+
white_contrast = (1.0 + 0.05) / (relative_luminance + 0.05)
|
340 |
+
black_contrast = (relative_luminance + 0.05) / (0.0 + 0.05)
|
341 |
+
return "white" if white_contrast > black_contrast else "black"
|
342 |
+
|
343 |
+
|
344 |
+
def create_watermark_clip(
|
345 |
+
watermark_text, frame, x_pos, y_pos, width, height, video_duration
|
346 |
+
):
|
347 |
+
optimal_color = calculate_text_color(frame, int(x_pos), int(y_pos), width, height)
|
348 |
+
watermark_clip = TextClip(
|
349 |
+
watermark_text,
|
350 |
+
fontsize=30,
|
351 |
+
color=optimal_color,
|
352 |
+
font=FONT_PATH,
|
353 |
+
)
|
354 |
+
watermark_width = watermark_clip.w
|
355 |
+
watermark_x = (720 - watermark_width) / 2
|
356 |
+
return watermark_clip.set_position((watermark_x, 1140)).set_duration(video_duration)
|
357 |
+
|
358 |
+
|
359 |
+
def generate_video(
|
360 |
+
video_path, title, comments, api_key, post_id, watermark_text="@damnsointeresting"
|
361 |
+
):
|
362 |
+
try:
|
363 |
+
output_dir = os.path.join(OUTPUT_DIR, post_id)
|
364 |
+
os.makedirs(output_dir, exist_ok=True)
|
365 |
+
video = VideoFileClip(video_path)
|
366 |
+
first_frame = video.get_frame(0)
|
367 |
+
bg = ImageClip(BACKGROUND_IMAGE).set_duration(video.duration)
|
368 |
+
audio = video.audio
|
369 |
+
|
370 |
+
video_area = (40, 290, 680, 1220)
|
371 |
+
video_width = video_area[2] - video_area[0]
|
372 |
+
video_height = video_area[3] - video_area[1]
|
373 |
+
video_ratio = video.w / video.h
|
374 |
+
if video_ratio > video_width / video_height:
|
375 |
+
new_width, new_height = video_width, int(video_width / video_ratio)
|
376 |
+
else:
|
377 |
+
new_width, new_height = int(video_height * video_ratio), video_height
|
378 |
+
video = video.resize(width=new_width, height=new_height)
|
379 |
+
x_pos = video_area[0] + (video_width - new_width) / 2
|
380 |
+
y_pos = video_area[1] + (video_height - new_height) / 2
|
381 |
+
video = video.set_position((x_pos, y_pos))
|
382 |
+
video = round_corners(video, 40)
|
383 |
+
|
384 |
+
generated_title, generated_caption = asyncio.run(
|
385 |
+
generate_title_and_caption(title, comments, first_frame, api_key)
|
386 |
+
)
|
387 |
+
if not generated_title or not generated_caption:
|
388 |
+
logging.error("Failed to generate title or caption. Skipping video.")
|
389 |
+
return None, None
|
390 |
+
|
391 |
+
title_clip = create_text_clip(
|
392 |
+
generated_title, (45, 190, 675, 285), 35, "white"
|
393 |
+
).set_duration(video.duration)
|
394 |
+
watermark_clip = create_watermark_clip(
|
395 |
+
watermark_text, first_frame, 210, 1140, 300, 40, video.duration
|
396 |
+
)
|
397 |
+
|
398 |
+
final = CompositeVideoClip(
|
399 |
+
[bg, video, title_clip, watermark_clip], size=(720, 1280)
|
400 |
+
)
|
401 |
+
final = final.set_duration(video.duration)
|
402 |
+
|
403 |
+
if audio:
|
404 |
+
final = final.set_audio(audio)
|
405 |
+
|
406 |
+
output_filename = os.path.join(output_dir, f"{post_id}.mp4")
|
407 |
+
final.write_videofile(output_filename, fps=30)
|
408 |
+
|
409 |
+
caption_filepath = os.path.join(output_dir, f"{post_id}.txt")
|
410 |
+
with open(caption_filepath, "w") as f:
|
411 |
+
f.write(
|
412 |
+
f"Title:\n{generated_title.strip()}\n\nCaption:\n{generated_caption.strip()}"
|
413 |
+
)
|
414 |
+
|
415 |
+
return output_filename, generated_title, generated_caption
|
416 |
+
except Exception as e:
|
417 |
+
logging.error(f"Error processing video: {e}")
|
418 |
+
return None, None, None
|
419 |
+
finally:
|
420 |
+
if "video" in locals():
|
421 |
+
video.close()
|
422 |
+
if "final" in locals():
|
423 |
+
final.close()
|
424 |
+
|
425 |
+
|
426 |
+
# ------------------ FastAPI App and Endpoints ------------------
|
427 |
+
|
428 |
+
app = FastAPI()
|
429 |
+
|
430 |
+
# Mount static directories (for output videos and any static assets)
|
431 |
+
if not os.path.exists(OUTPUT_DIR):
|
432 |
+
os.makedirs(OUTPUT_DIR)
|
433 |
+
if not os.path.exists("static"):
|
434 |
+
os.makedirs("static")
|
435 |
+
app.mount("/output", StaticFiles(directory=OUTPUT_DIR), name="output")
|
436 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
437 |
+
|
438 |
+
templates = Jinja2Templates(directory="templates")
|
439 |
+
|
440 |
+
|
441 |
+
# Serve the index HTML page
|
442 |
+
@app.get("/", response_class=HTMLResponse)
|
443 |
+
async def index(request: Request):
|
444 |
+
return templates.TemplateResponse("index.html", {"request": request})
|
445 |
+
|
446 |
+
|
447 |
+
@app.get("/api/videos")
|
448 |
+
async def get_videos():
|
449 |
+
reddit = await initialize_reddit()
|
450 |
+
processed_videos = await load_processed_videos()
|
451 |
+
all_posts = []
|
452 |
+
for subreddit_name in subreddits:
|
453 |
+
posts = await fetch_trending_videos(reddit, processed_videos, subreddit_name)
|
454 |
+
for post in posts:
|
455 |
+
try:
|
456 |
+
duration = post.media["reddit_video"]["duration"]
|
457 |
+
except KeyError:
|
458 |
+
duration = None
|
459 |
+
|
460 |
+
# Add preview_video if available from the reddit_video media.
|
461 |
+
preview_video = None
|
462 |
+
try:
|
463 |
+
reddit_video = post.media.get("reddit_video")
|
464 |
+
if reddit_video and "fallback_url" in reddit_video:
|
465 |
+
preview_video = reddit_video["fallback_url"]
|
466 |
+
except Exception:
|
467 |
+
preview_video = None
|
468 |
+
|
469 |
+
video_info = {
|
470 |
+
"id": post.id,
|
471 |
+
"title": post.title,
|
472 |
+
"ups": post.ups,
|
473 |
+
"url": post.url,
|
474 |
+
"subreddit": subreddit_name,
|
475 |
+
"duration": duration,
|
476 |
+
"thumbnail": post.thumbnail if hasattr(post, "thumbnail") else None,
|
477 |
+
"preview_video": preview_video,
|
478 |
+
}
|
479 |
+
all_posts.append(video_info)
|
480 |
+
await reddit.close()
|
481 |
+
all_posts.sort(key=lambda x: x["ups"], reverse=True)
|
482 |
+
return JSONResponse(content=all_posts)
|
483 |
+
|
484 |
+
|
485 |
+
@app.post("/api/process")
|
486 |
+
async def process_selected_videos(
|
487 |
+
background_tasks: BackgroundTasks,
|
488 |
+
videos: List[VideoItem] = Body(...),
|
489 |
+
):
|
490 |
+
if not videos:
|
491 |
+
raise HTTPException(status_code=400, detail="No videos provided")
|
492 |
+
for video_info in videos:
|
493 |
+
video_id = video_info.id # now you can access attributes directly
|
494 |
+
if video_id in processing_results:
|
495 |
+
continue # already processing or processed
|
496 |
+
processing_results[video_id] = {"status": "pending"}
|
497 |
+
background_tasks.add_task(process_video_task, video_info.dict())
|
498 |
+
return JSONResponse(
|
499 |
+
content={
|
500 |
+
"message": "Processing started",
|
501 |
+
"video_ids": [video.id for video in videos],
|
502 |
+
}
|
503 |
+
)
|
504 |
+
|
505 |
+
|
506 |
+
async def process_video_task(video_info: dict):
|
507 |
+
video_id = video_info.get("id")
|
508 |
+
try:
|
509 |
+
api_key = os.environ.get("GOOGLE_API_KEY")
|
510 |
+
if not api_key:
|
511 |
+
processing_results[video_id] = {"status": "error", "error": "Missing GOOGLE_API_KEY"}
|
512 |
+
return
|
513 |
+
|
514 |
+
reddit = await initialize_reddit()
|
515 |
+
submission = await reddit.submission(id=video_id)
|
516 |
+
comments = await get_n_comments(submission, 5)
|
517 |
+
comments_string = "\n\n".join(comments) if comments else "No comments found."
|
518 |
+
|
519 |
+
temp_video_filename = f"{video_id}.mp4"
|
520 |
+
downloaded_video = await download_video(video_info.get("url"), temp_video_filename)
|
521 |
+
if not downloaded_video:
|
522 |
+
processing_results[video_id] = {"status": "error", "error": "Failed to download video"}
|
523 |
+
await reddit.close()
|
524 |
+
return
|
525 |
+
|
526 |
+
# Offload heavy MoviePy processing using the process pool.
|
527 |
+
loop = asyncio.get_running_loop()
|
528 |
+
result = await loop.run_in_executor(
|
529 |
+
process_executor,
|
530 |
+
generate_video, # blocking function using MoviePy
|
531 |
+
downloaded_video,
|
532 |
+
video_info.get("title"),
|
533 |
+
comments_string,
|
534 |
+
api_key,
|
535 |
+
f"{video_id}",
|
536 |
+
"@damnsointeresting"
|
537 |
+
)
|
538 |
+
|
539 |
+
if result and result[0]:
|
540 |
+
output_file, generated_title, generated_caption = result
|
541 |
+
video_url = f"/output/{video_id}/{video_id}.mp4"
|
542 |
+
processing_results[video_id] = {
|
543 |
+
"status": "completed",
|
544 |
+
"video_url": video_url,
|
545 |
+
"generated_title": generated_title,
|
546 |
+
"generated_caption": generated_caption
|
547 |
+
}
|
548 |
+
else:
|
549 |
+
processing_results[video_id] = {"status": "error", "error": "Video processing failed"}
|
550 |
+
|
551 |
+
if os.path.exists(temp_video_filename):
|
552 |
+
os.remove(temp_video_filename)
|
553 |
+
await reddit.close()
|
554 |
+
except Exception as e:
|
555 |
+
processing_results[video_id] = {"status": "error", "error": str(e)}
|
556 |
+
|
557 |
+
|
558 |
+
@app.get("/api/results")
|
559 |
+
async def get_results():
|
560 |
+
return JSONResponse(content=processing_results)
|
561 |
+
|
562 |
+
|
563 |
+
@app.get("/api/settings")
|
564 |
+
async def get_settings():
|
565 |
+
return JSONResponse(content={"subreddits": subreddits})
|
566 |
+
|
567 |
+
|
568 |
+
@app.post("/api/settings")
|
569 |
+
async def update_settings(data: dict):
|
570 |
+
new_subs = data.get("subreddits")
|
571 |
+
if not isinstance(new_subs, list):
|
572 |
+
raise HTTPException(status_code=400, detail="subreddits must be a list")
|
573 |
+
global subreddits
|
574 |
+
subreddits = new_subs
|
575 |
+
return JSONResponse(
|
576 |
+
content={"message": "Settings updated", "subreddits": subreddits}
|
577 |
+
)
|
578 |
+
|
579 |
+
|
580 |
+
@app.delete("/api/video/{video_id}")
|
581 |
+
async def delete_video(video_id: str):
|
582 |
+
video_folder = os.path.join(OUTPUT_DIR, video_id)
|
583 |
+
if os.path.exists(video_folder):
|
584 |
+
try:
|
585 |
+
shutil.rmtree(video_folder)
|
586 |
+
# Remove from our in‐memory processing_results if present.
|
587 |
+
if video_id in processing_results:
|
588 |
+
del processing_results[video_id]
|
589 |
+
return JSONResponse(content={"message": f"Video {video_id} deleted."})
|
590 |
+
except Exception as e:
|
591 |
+
raise HTTPException(status_code=500, detail=f"Error deleting video: {e}")
|
592 |
+
else:
|
593 |
+
raise HTTPException(status_code=404, detail="Video not found.")
|
594 |
+
|
595 |
+
|
596 |
+
@app.websocket("/ws/logs")
|
597 |
+
async def websocket_logs(websocket: WebSocket):
|
598 |
+
await websocket.accept()
|
599 |
+
last_index = 0
|
600 |
+
try:
|
601 |
+
while True:
|
602 |
+
# Sleep briefly to prevent busy looping
|
603 |
+
await asyncio.sleep(1)
|
604 |
+
# If new log messages are available, send them to the client.
|
605 |
+
if last_index < len(live_logs):
|
606 |
+
for log in live_logs[last_index:]:
|
607 |
+
await websocket.send_text(log)
|
608 |
+
last_index = len(live_logs)
|
609 |
+
except WebSocketDisconnect:
|
610 |
+
logging.info("Client disconnected from live logs websocket")
|
611 |
+
|
612 |
+
|
613 |
+
# To run the app:
|
614 |
+
# uvicorn main:app --reload
|