tecuts commited on
Commit
3394e4a
·
verified ·
1 Parent(s): 3f590db

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +48 -0
  2. main.py +445 -0
  3. requirements.txt +5 -0
  4. www.youtube.com_cookies.txt +0 -0
Dockerfile ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a specific Python version
2
+ FROM python:3.10
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Install required system packages (ffmpeg is needed by yt-dlp for merging/conversion)
8
+ RUN apt-get update && apt-get install -y --no-install-recommends ffmpeg && \
9
+ apt-get clean && rm -rf /var/lib/apt/lists/*
10
+
11
+ # Create a non-root user for security
12
+ # Using uid 1000 which is common
13
+ RUN useradd -m -u 1000 appuser
14
+
15
+ # Copy requirements first to leverage Docker cache
16
+ COPY requirements.txt .
17
+
18
+ # Install Python dependencies
19
+ # Ensure requirements.txt contains:
20
+ # fastapi[all]
21
+ # yt-dlp
22
+ # requests
23
+ RUN pip install --no-cache-dir --pre -r requirements.txt
24
+
25
+ # Copy the rest of the application files
26
+ # Make sure main.py and www.youtube.com_cookies.txt (if used) are in the build context
27
+ COPY . .
28
+
29
+ # Set permissions:
30
+ # - Directories: read/execute for all, write for owner (755)
31
+ # - Files: read for all, write for owner (644)
32
+ # - Change ownership to the non-root user
33
+ # Note: Explicit chmod for cookie file might be redundant if COPY respects source permissions, but doesn't hurt.
34
+ RUN find /app -type d -exec chmod 755 {} \; && \
35
+ find /app -type f -exec chmod 644 {} \; && \
36
+ chown -R appuser:appuser /app
37
+
38
+ # Switch to the non-root user
39
+ USER appuser
40
+
41
+ # Expose the port the app will run on (common for HF Spaces)
42
+ EXPOSE 8000
43
+
44
+ # Command to run the application using uvicorn
45
+ # - main:app -> finds the 'app' instance in the 'main.py' file
46
+ # - --host 0.0.0.0 -> makes the server accessible from outside the container
47
+ # - --port 7860 -> matches the EXPOSE directive
48
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
main.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ import logging
4
+ from pathlib import Path
5
+ from typing import Optional, Literal, Union, Dict, Any # Import Union
6
+ from urllib.parse import urlparse
7
+ import requests
8
+ import cloudscraper
9
+ import re
10
+ # --- FastAPI Imports ---
11
+ from fastapi import FastAPI, Request, HTTPException, BackgroundTasks, Body
12
+ from fastapi.responses import JSONResponse, FileResponse
13
+ from fastapi.staticfiles import StaticFiles
14
+ from pydantic import BaseModel, HttpUrl, Field, field_validator # Import field_validator
15
+
16
+ # --- yt-dlp Import ---
17
+ from yt_dlp import YoutubeDL
18
+
19
+ # --- Logging Configuration ---
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # --- Constants ---
24
+ DOWNLOAD_DIR = Path('downloads') # Use pathlib for paths
25
+ COOKIE_FILE = 'www.youtube.com_cookies.txt' # Define cookie file path
26
+
27
+ # --- Create Download Directory ---
28
+ DOWNLOAD_DIR.mkdir(parents=True, exist_ok=True)
29
+
30
+ # --- FastAPI App Initialization ---
31
+ app = FastAPI(
32
+ title="tesings",
33
+ description="API to fetch info",
34
+ version="1.4.0", # Incremented version
35
+ )
36
+
37
+ # --- Mount Static Files Directory ---
38
+ app.mount("/downloads", StaticFiles(directory=DOWNLOAD_DIR), name="downloads")
39
+
40
+ # --- Pydantic Models for Request/Response Validation ---
41
+
42
+ class UrlRequest(BaseModel):
43
+ """Request model for endpoints needing just a URL."""
44
+ url: HttpUrl
45
+
46
+ # Define allowed quality string literals (including numerical ones)
47
+ AllowedQualityStr = Literal['best', '240', '480', '720', '1080', '1440', '2160']
48
+
49
+ class MaxDownloadRequest(BaseModel):
50
+ """Request model for the /max endpoint."""
51
+ url: HttpUrl
52
+ # Accept 'best' or specific numerical resolutions as strings
53
+ quality: Optional[AllowedQualityStr] = 'best'
54
+
55
+ class InfoResponse(BaseModel):
56
+ """Response model for the /get-info endpoint."""
57
+ title: Optional[str] = None
58
+ thumbnail: Optional[str] = None
59
+ duration: Optional[float] = None
60
+ channel: Optional[str] = None
61
+
62
+ class DownloadResponse(BaseModel):
63
+ """Response model for download endpoints."""
64
+ url: str
65
+ filename: str
66
+ message: Optional[str] = None
67
+
68
+ class ErrorResponse(BaseModel):
69
+ """Standard error response model."""
70
+ detail: str
71
+
72
+ # --- Helper Function for Download ---
73
+ def perform_download(ydl_opts: dict, url: str, file_path: Path):
74
+ """Synchronously downloads using yt-dlp."""
75
+ try:
76
+ logger.info(f"Starting download for URL: {url} with options: {ydl_opts}")
77
+ ydl_opts['outtmpl'] = str(file_path.with_suffix('.%(ext)s'))
78
+
79
+ with YoutubeDL(ydl_opts) as ydl:
80
+ ydl.extract_info(url, download=True)
81
+ logger.info(f"Download finished successfully for URL: {url}")
82
+
83
+ downloaded_files = list(DOWNLOAD_DIR.glob(f"{file_path.stem}.*"))
84
+ if not downloaded_files:
85
+ logger.error(f"Download completed but no file found for stem: {file_path.stem}")
86
+ part_files = list(DOWNLOAD_DIR.glob(f"{file_path.stem}.*.part"))
87
+ for part_file in part_files:
88
+ try:
89
+ os.remove(part_file)
90
+ logger.info(f"Removed leftover part file: {part_file}")
91
+ except OSError as rm_err:
92
+ logger.error(f"Error removing part file {part_file}: {rm_err}")
93
+ raise RuntimeError(f"Could not find downloaded file for {url}")
94
+ return downloaded_files[0]
95
+
96
+ except Exception as e:
97
+ logger.error(f"yt-dlp download failed for URL {url}: {e}", exc_info=True)
98
+ possible_files = list(DOWNLOAD_DIR.glob(f"{file_path.stem}.*"))
99
+ for f in possible_files:
100
+ if f.is_file():
101
+ try:
102
+ os.remove(f)
103
+ logger.info(f"Removed potentially incomplete/failed file: {f}")
104
+ except OSError as rm_err:
105
+ logger.error(f"Error removing file {f}: {rm_err}")
106
+ raise
107
+
108
+ # --- API Endpoints ---
109
+
110
+ @app.get("/")
111
+ async def root():
112
+ """Root endpoint providing basic API info."""
113
+ return {"message": "Running in errors."}
114
+ @app.post(
115
+ "/get-info",
116
+ response_model=InfoResponse,
117
+ responses={500: {"model": ErrorResponse}}
118
+ )
119
+ async def get_info(payload: UrlRequest = Body(...)):
120
+ """
121
+ Extracts video information (title, thumbnail, duration, channel) from a given URL.
122
+ """
123
+ logger.info(f"Received /get-info request for URL: {payload.url}")
124
+ ydl_opts = {}
125
+ if os.path.exists(COOKIE_FILE):
126
+ ydl_opts['cookiefile'] = COOKIE_FILE
127
+ logger.info("Using cookie file.")
128
+ else:
129
+ logger.warning(f"Cookie file '{COOKIE_FILE}' not found. Some videos might require login/cookies.")
130
+
131
+ try:
132
+ # Use str(payload.url) to pass the URL string to yt-dlp
133
+ with YoutubeDL(ydl_opts) as ydl:
134
+ info = ydl.extract_info(str(payload.url), download=False)
135
+ return InfoResponse(
136
+ title=info.get('title'),
137
+ thumbnail=info.get('thumbnail'),
138
+ duration=info.get('duration'),
139
+ channel=info.get('channel')
140
+ )
141
+ except Exception as e:
142
+ logger.error(f"Error fetching info for {payload.url}: {e}", exc_info=True)
143
+ raise HTTPException(status_code=500, detail=f"Failed to extract video info: {str(e)}")
144
+
145
+ '''@app.post(
146
+ "/download",
147
+ response_model=DownloadResponse,
148
+ responses={400: {"model": ErrorResponse}, 500: {"model": ErrorResponse}}
149
+ )
150
+ async def download_audio(request: Request, payload: UrlRequest = Body(...)):
151
+ """
152
+ Downloads the audio track of a video as an MP3 file (128kbps).
153
+ """
154
+ logger.info(f"Received /download (audio) request for URL: {payload.url}")
155
+ unique_id = str(uuid.uuid4())
156
+ file_path_stem = DOWNLOAD_DIR / unique_id
157
+
158
+ ydl_opts = {
159
+ 'format': '140/m4a/bestaudio/best',
160
+ 'outtmpl': str(file_path_stem.with_suffix('.%(ext)s')),
161
+ 'postprocessors': [{
162
+ 'key': 'FFmpegExtractAudio',
163
+ 'preferredcodec': 'mp3',
164
+ 'preferredquality': '128',
165
+ }],
166
+ 'noplaylist': True,
167
+ 'quiet': False,
168
+ 'progress_hooks': [lambda d: logger.debug(f"Download progress: {d['status']} - {d.get('_percent_str', '')}")],
169
+ }
170
+ if os.path.exists(COOKIE_FILE):
171
+ ydl_opts['cookiefile'] = COOKIE_FILE
172
+ logger.info("Using cookie file for audio download.")
173
+ else:
174
+ logger.warning(f"Cookie file '{COOKIE_FILE}' not found for audio download.")
175
+
176
+ try:
177
+ # Use str(payload.url) to pass the URL string to the helper
178
+ final_file_path = perform_download(ydl_opts, str(payload.url), file_path_stem)
179
+ final_filename = final_file_path.name
180
+ download_url = f"{str(request.base_url).rstrip('/')}/downloads/{final_filename}"
181
+ logger.info(f"Audio download complete for {payload.url}. URL: {download_url}")
182
+ return DownloadResponse(url=download_url, filename=final_filename)
183
+
184
+ except Exception as e:
185
+ # Error logged in perform_download
186
+ raise HTTPException(status_code=500, detail=f"Audio download failed: {str(e)}")
187
+ '''
188
+ yt_api = os.getenv("yt_api")
189
+ class ApiRotator:
190
+ def __init__(self, apis):
191
+ self.apis = apis
192
+ self.last_successful_index = None
193
+
194
+ def get_prioritized_apis(self):
195
+ if self.last_successful_index is not None:
196
+ # Move the last successful API to the front
197
+ rotated_apis = (
198
+ [self.apis[self.last_successful_index]] +
199
+ self.apis[:self.last_successful_index] +
200
+ self.apis[self.last_successful_index+1:]
201
+ )
202
+ return rotated_apis
203
+ return self.apis
204
+
205
+ def update_last_successful(self, index):
206
+ self.last_successful_index = index
207
+
208
+ # In your function:
209
+ api_rotator = ApiRotator([
210
+ yt_api,
211
+ "https://dwnld.nichind.dev",
212
+ "https://chrunos-load.hf.space",
213
+ "https://cobalt-api.kwiatekmiki.com"
214
+ ])
215
+
216
+ async def get_track_download_url(video_url: str, quality: str) -> str:
217
+ apis = api_rotator.get_prioritized_apis()
218
+ session = cloudscraper.create_scraper() # Requires cloudscraper package
219
+ headers = {
220
+ "Accept": "application/json",
221
+ "Content-Type": "application/json",
222
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
223
+ }
224
+ quality_lower = quality.lower()
225
+ if quality_lower == "max":
226
+ body_json = {"url": video_url, "videoQuality": "max", "filenameStyle": "pretty", "youtubeVideoCodec": "vp9"}
227
+ else:
228
+ body_json = {"url": video_url, "videoQuality": quality, "filenameStyle": "pretty", "youtubeVideoCodec": "h264"}
229
+
230
+ for i, api_url in enumerate(apis):
231
+ try:
232
+ logger.info(f"Attempting to get download URL from: {api_url}")
233
+ y_url = video_url
234
+ response = session.post(
235
+ api_url,
236
+ timeout=20,
237
+ json=body_json,
238
+ headers=headers
239
+ )
240
+ logger.info(f"Response status: {response.status_code}")
241
+ logger.info(f"Response content: {response.content}")
242
+
243
+ if response.headers.get('content-type', '').startswith('application/json'):
244
+ json_response = response.json()
245
+ error_code = json_response.get("error", {}).get("code", "")
246
+
247
+ if error_code == "error.api.content.video.age":
248
+ logger.warning(f"Video unavailable error from {api_url}")
249
+ break # Only break for specific error
250
+
251
+ if "url" in json_response or "picker" in json_response:
252
+ api_rotator.update_last_successful(i)
253
+ return json_response
254
+
255
+ except Exception as e:
256
+ logger.error(f"Failed with {api_url}: {str(e)}")
257
+ continue
258
+
259
+ logger.error(f"No download URL found")
260
+ return {"error": "Download URL not found"}
261
+
262
+ reads_api = os.getenv("reads_api")
263
+ def call_extract_endpoint(post_url: str) -> Dict[str, Any]:
264
+
265
+ try:
266
+ response = requests.get(f"{reads_api}/extract", params={"url": post_url})
267
+ response.raise_for_status()
268
+ return response.json()
269
+ except requests.RequestException as e:
270
+ raise requests.RequestException(f"Failed to call extract endpoint: {str(e)}")
271
+ except ValueError as e:
272
+ raise ValueError(f"Invalid response format: {str(e)}")
273
+
274
+
275
+ def is_threads_url(url: str) -> bool:
276
+ """Validate if URL is a valid Threads URL"""
277
+ try:
278
+ parsed = urlparse(url)
279
+ logger.info(parsed)
280
+ # Check if netloc matches known Threads domains
281
+ if parsed.netloc not in ['threads.net', 'www.threads.net', 'threads.com', 'www.threads.com']:
282
+ return False
283
+ # Check if path contains /post/ or /t/ after a username
284
+ if '/post/' in parsed.path or '/t/' in parsed.path:
285
+ return True
286
+ return False
287
+ except Exception:
288
+ return False
289
+
290
+ @app.post("/multi")
291
+ async def multi_download(request: Request):
292
+ data = await request.json()
293
+ video_url = data.get('url')
294
+ quality = data.get('videoQuality')
295
+ logger.info(f'{video_url}, {quality}')
296
+ if not video_url:
297
+ raise HTTPException(
298
+ status_code=400,
299
+ detail={"error": "Input 'url' is required."}
300
+ )
301
+
302
+ # Basic URL validation: checks if it starts with http/https and has some content after.
303
+ # For more robust validation, consider using a library like 'validators'.
304
+ if not re.match(r'^https?://\S+', str(video_url)): # Ensure video_url is treated as string for regex
305
+ raise HTTPException(
306
+ status_code=400,
307
+ detail={"error": f"Input 'url' ('{video_url}') is not a valid URL. It must start with http:// or https://."}
308
+ )
309
+
310
+ if not quality: # Checks for None or empty string
311
+ raise HTTPException(
312
+ status_code=400,
313
+ detail={"error": "This version of shortcut is outdated. Get the latest version of Chrunos Multi Downloader shortcut."}
314
+ )
315
+ if quality == "mp3":
316
+ parameter = 'type=audio'
317
+ else:
318
+ parameter = f'type=video&quality={quality}'
319
+
320
+ if is_threads_url(video_url):
321
+ return call_extract_endpoint(video_url)
322
+ else:
323
+ dl_url = await get_track_download_url(video_url, quality)
324
+ if dl_url:
325
+
326
+ return dl_url
327
+ else:
328
+ return {
329
+ "error": "Failed to Fetch the video."
330
+ }
331
+
332
+
333
+ @app.post(
334
+ "/max",
335
+ response_model=DownloadResponse,
336
+ responses={400: {"model": ErrorResponse}, 500: {"model": ErrorResponse}}
337
+ )
338
+ async def download_video_max_quality(request: Request, payload: MaxDownloadRequest = Body(...)):
339
+ """
340
+ Downloads the video in the specified quality or 'best' available, handling
341
+ both landscape and portrait videos correctly. Attempts H.264 codec for 1080
342
+ and lower. Merges video and audio into MP4.
343
+ Accepted qualities: 'best', '240', '480', '720', '1080', '1440', '2160'.
344
+ Quality number (as string) refers to the maximum dimension (height or width).
345
+ """
346
+ logger.info(f"Received /max (video) request for URL: {payload.url} with quality: {payload.quality}")
347
+
348
+ unique_id = str(uuid.uuid4())
349
+ file_path_stem = DOWNLOAD_DIR / unique_id
350
+
351
+ # --- Determine yt-dlp Format Selector based on Quality and Codec Preference ---
352
+ quality_str = payload.quality # Quality is now guaranteed to be a string from AllowedQualityStr
353
+ format_selector = None
354
+ max_dim = 0 # Initialize max_dim
355
+
356
+ if quality_str == 'best':
357
+ format_selector = 'bestvideo+bestaudio/best' # Best video and audio, merged if possible
358
+ logger.info("Using format selector for 'best' quality.")
359
+ else:
360
+ # Quality is a numerical string ('240', '480', etc.)
361
+ try:
362
+ # Convert the validated string quality to an integer for logic
363
+ max_dim = int(quality_str)
364
+ except ValueError:
365
+ # This should not happen if Pydantic validation works, but good practice
366
+ logger.error(f"Internal error: Could not convert validated quality string '{quality_str}' to int. Falling back to 'best'.")
367
+ format_selector = 'bestvideo+bestaudio/best'
368
+ # Set max_dim to a high value to skip specific logic below if format_selector is set
369
+ max_dim = 99999
370
+
371
+ # Only proceed if format_selector wasn't set in the except block
372
+ long_edge = int(max_dim * 1.8)
373
+ if not format_selector:
374
+ # --- Codec Preference Logic ---
375
+ if max_dim <= 1080:
376
+ # Prefer H.264 (avc1) for 1080 or lower max dimension
377
+ logger.info(f"Attempting H.264 codec for requested quality (max dimension): {max_dim}")
378
+ format_selector = f'bestvideo[vcodec^=avc][height<={long_edge}][width<={long_edge}]+bestaudio/best'
379
+ #f'bestvideo[height<={long_edge}]/bestvideo[width<={long_edge}]+bestaudio/'
380
+ #f'best[height<={long_edge}]/best[width<={long_edge}]'
381
+ else:
382
+ # For > 1080 max dimension, prioritize best available codec
383
+ logger.info(f"Attempting best available codec for requested quality (max dimension): {max_dim}")
384
+ format_selector = f'bestvideo[height<={long_edge}][width<={long_edge}]+bestaudio/best'
385
+
386
+ logger.info(f"Using format selector: '{format_selector}'")
387
+
388
+ # --- yt-dlp Options for Video Download ---
389
+ ydl_opts = {
390
+ 'format': format_selector,
391
+ 'outtmpl': str(file_path_stem.with_suffix('.%(ext)s')),
392
+ 'merge_output_format': 'mp4', # Merge into MP4 container
393
+ 'noplaylist': True,
394
+ 'quiet': True,
395
+ 'noprogress': True
396
+ }
397
+ if os.path.exists(COOKIE_FILE):
398
+ ydl_opts['cookiefile'] = COOKIE_FILE
399
+ logger.info("Using cookie file for video download.")
400
+ else:
401
+ logger.warning(f"Cookie file '{COOKIE_FILE}' not found for video download.")
402
+
403
+ try:
404
+ # Use str(payload.url) to pass the URL string to the helper
405
+ final_file_path = perform_download(ydl_opts, str(payload.url), file_path_stem)
406
+ final_filename = final_file_path.name
407
+ download_url = f"{str(request.base_url).rstrip('/')}/downloads/{final_filename}"
408
+
409
+ logger.info(f"Video download complete for {payload.url}. URL: {download_url}")
410
+ # Changed 'download_url=' to 'url='
411
+ return DownloadResponse(url=download_url, filename=final_filename)
412
+
413
+ except Exception as e:
414
+ # Error logged in perform_download
415
+ raise HTTPException(status_code=500, detail=f"Video download failed: {str(e)}")
416
+
417
+ # --- Optional: Cleanup Task ---
418
+ async def cleanup_old_files(directory: Path, max_age_seconds: int):
419
+ """Removes files older than max_age_seconds in the background."""
420
+ import time
421
+ now = time.time()
422
+ count = 0
423
+ try:
424
+ for item in directory.iterdir():
425
+ if item.is_file():
426
+ try:
427
+ if now - item.stat().st_mtime > max_age_seconds:
428
+ os.remove(item)
429
+ logger.info(f"Cleaned up old file: {item.name}")
430
+ count += 1
431
+ except OSError as e:
432
+ logger.error(f"Error removing file {item}: {e}")
433
+ if count > 0:
434
+ logger.info(f"Background cleanup finished. Removed {count} old files.")
435
+ else:
436
+ logger.info("Background cleanup finished. No old files found.")
437
+ except Exception as e:
438
+ logger.error(f"Error during background file cleanup: {e}", exc_info=True)
439
+
440
+ @app.post("/trigger-cleanup")
441
+ async def trigger_cleanup(background_tasks: BackgroundTasks):
442
+ """Manually trigger a cleanup of files older than 1 day."""
443
+ logger.info("Triggering background cleanup of old download files.")
444
+ background_tasks.add_task(cleanup_old_files, DOWNLOAD_DIR, 86400) # 1 day
445
+ return {"message": "Background cleanup task scheduled."}
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ uvicorn
2
+ yt-dlp
3
+ fastapi
4
+ requests
5
+ cloudscraper
www.youtube.com_cookies.txt ADDED
The diff for this file is too large to render. See raw diff