Chrunos commited on
Commit
ca71de9
·
verified ·
1 Parent(s): 8af6ecf

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +288 -0
app.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import time
3
+ from fastapi import FastAPI, BackgroundTasks, Request, HTTPException, Security
4
+ from fastapi.security import APIKeyHeader
5
+ from fastapi.responses import FileResponse
6
+ from fastapi.concurrency import run_in_threadpool
7
+ import yt_dlp
8
+ import ffmpeg
9
+ import urllib.parse
10
+ from urllib.parse import urlparse
11
+ import os
12
+ from datetime import datetime, timedelta
13
+ import schedule
14
+ import requests
15
+ import uvicorn
16
+ import subprocess
17
+ import json
18
+ from dotenv import load_dotenv
19
+ import mimetypes
20
+ import tempfile
21
+ from PIL import Image
22
+ from io import BytesIO
23
+ from pathlib import Path
24
+ from fastapi.staticfiles import StaticFiles
25
+ from collections import defaultdict
26
+ from starlette.responses import JSONResponse
27
+ import logging
28
+ import gc
29
+ from typing import Dict, Any
30
+ import re
31
+ import asyncio
32
+ import cloudscraper
33
+
34
+
35
+ tmp_dir = tempfile.gettempdir()
36
+
37
+ logging.basicConfig(level=logging.INFO)
38
+ logger = logging.getLogger(__name__)
39
+
40
+ load_dotenv()
41
+ app = FastAPI()
42
+
43
+ # Define a global temporary download directory
44
+ global_download_dir = tempfile.mkdtemp()
45
+
46
+ # Rate limiting dictionary
47
+ class RateLimiter:
48
+ def __init__(self, max_requests: int, time_window: timedelta):
49
+ self.max_requests = max_requests
50
+ self.time_window = time_window
51
+ self.requests: Dict[str, list] = defaultdict(list)
52
+
53
+ def _cleanup_old_requests(self, user_ip: str) -> None:
54
+ """Remove requests that are outside the time window."""
55
+ current_time = time.time()
56
+ self.requests[user_ip] = [
57
+ timestamp for timestamp in self.requests[user_ip]
58
+ if current_time - timestamp < self.time_window.total_seconds()
59
+ ]
60
+
61
+ def is_rate_limited(self, user_ip: str) -> bool:
62
+ """Check if the user has exceeded their rate limit."""
63
+ self._cleanup_old_requests(user_ip)
64
+
65
+ # Get current count after cleanup
66
+ current_count = len(self.requests[user_ip])
67
+
68
+ # Add current request timestamp (incrementing the count)
69
+ current_time = time.time()
70
+ self.requests[user_ip].append(current_time)
71
+
72
+ # Check if user has exceeded the maximum requests
73
+ return (current_count + 1) > self.max_requests
74
+
75
+ def get_current_count(self, user_ip: str) -> int:
76
+ """Get the current request count for an IP."""
77
+ self._cleanup_old_requests(user_ip)
78
+ return len(self.requests[user_ip])
79
+
80
+
81
+ # Initialize rate limiter with 100 requests per day
82
+ rate_limiter = RateLimiter(
83
+ max_requests=12,
84
+ time_window=timedelta(days=1)
85
+ )
86
+
87
+ def get_user_ip(request: Request) -> str:
88
+ """Helper function to get user's IP address."""
89
+ forwarded = request.headers.get("X-Forwarded-For")
90
+ if forwarded:
91
+ return forwarded.split(",")[0]
92
+ return request.client.host
93
+
94
+ ALT_API = os.getenv("ALT_API")
95
+
96
+ def extract_video_info(video_url: str) -> str:
97
+ EXTRACT_API = ALT_API
98
+ api_url = f'{EXTRACT_API}?url={video_url}'
99
+ logger.info(api_url)
100
+ session = cloudscraper.create_scraper()
101
+ try:
102
+ response = session.get(api_url, timeout=20)
103
+
104
+ if response.status_code == 200:
105
+ json_response = response.json()
106
+ result = []
107
+ # 检查 formats 列表是否存在且不为空
108
+ if 'formats' in json_response:
109
+ for format_item in json_response['formats']:
110
+ format_url = format_item.get('url')
111
+ format_id = format_item.get('format_id')
112
+ p_cookies = format_item.get('cookies')
113
+ if format_id and format_url:
114
+ result.append({
115
+ "url": format_url,
116
+ "format_id": format_id,
117
+ "cookies": p_cookies
118
+ })
119
+
120
+ title = json_response.get('title')
121
+ logger.info(title)
122
+ if "ornhub.com" in video_url:
123
+ p_result = [item for item in result if 'hls' in item['format_id']]
124
+ last_item = p_result[-1]
125
+ second_last_item = p_result[-2]
126
+ last_item["format_id"] = f'{last_item["format_id"]} - Chrunos Shortcuts Premium Only'
127
+ last_item["url"] = 'https://chrunos.com/premium-shortcuts/'
128
+ second_last_item["format_id"] = f'{second_last_item["format_id"]} - Chrunos Shortcuts Premium Only'
129
+ second_last_item["url"] = 'https://chrunos.com/premium-shortcuts/'
130
+ return p_result
131
+ else:
132
+ new_result = result
133
+ # Check if new_result has more than one item
134
+ if len(new_result) > 3:
135
+ for i in range(3, len(new_result)):
136
+ item = new_result[i]
137
+ item["format_id"] = f'{item["format_id"]} - Chrunos Shortcuts Premium Only'
138
+ item["url"] = 'https://chrunos.com/premium-shortcuts/'
139
+ elif 2 <= len(new_result) <= 3:
140
+ last_item = new_result[-1]
141
+ last_item["format_id"] = f'{last_item["format_id"]} - Chrunos Shortcuts Premium Only'
142
+ last_item["url"] = 'https://chrunos.com/premium-shortcuts/'
143
+ elif len(new_result) == 1:
144
+ new_item = {"url": "https://chrunos.com/premium-shortcuts/",
145
+ "format_id": "Best Qaulity Video - Chrunos Shortcuts Premium Only"
146
+ }
147
+ new_result.append(new_item)
148
+
149
+ return new_result
150
+ else:
151
+ if 'url' in json_response:
152
+ download_url = json_response.get('url')
153
+ thumbnail_url = json_response.get('thumbnail')
154
+ return [
155
+ {"url": download_url,
156
+ "format_id": "Normal Quality Video"
157
+ },
158
+ {"url": thumbnail_url,
159
+ "format_id": "thumbnail"},
160
+ {"url": "https://chrunos.com/premium-shortcuts/",
161
+ "format_id": "Best Qaulity Video - Chrunos Shortcuts Premium Only"}
162
+ ]
163
+ return {"error": "No formats available. Report Error on Telegram"}
164
+ else:
165
+ return {"error": f"Request failed with status code {response.status_code}, API: {api_url}"}
166
+ except Exception as e:
167
+ logger.error(f"An error occurred: {e}")
168
+ return {"error": str(e)}
169
+
170
+
171
+ @app.post("/test")
172
+ async def test_download(request: Request):
173
+ user_ip = get_user_ip(request)
174
+ if rate_limiter.is_rate_limited(user_ip):
175
+ current_count = rate_limiter.get_current_count(user_ip)
176
+ raise HTTPException(
177
+ status_code=429,
178
+ detail={
179
+ "error": "You have exceeded the maximum number of requests per day. Please try again tomorrow.",
180
+ "url": "https://t.me/chrunoss"
181
+ }
182
+ )
183
+ data = await request.json()
184
+ video_url = data.get('url')
185
+ response = extract_video_info(video_url)
186
+ return response
187
+
188
+
189
+ @app.post("/ripper")
190
+ async def ripper_download(request: Request):
191
+ user_ip = get_user_ip(request)
192
+ if rate_limiter.is_rate_limited(user_ip):
193
+ raise HTTPException(
194
+ status_code=429,
195
+ detail={
196
+ "error": "You have exceeded the maximum number of requests per day. Please try again tomorrow.",
197
+ "url": "https://t.me/chrunoss"
198
+ }
199
+ )
200
+
201
+ data = await request.json()
202
+ video_url = data.get('url')
203
+ quality = data.get('quality', '720') # Default to 720p
204
+
205
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
206
+
207
+ # Define format options based on quality
208
+ if quality == 'MP3':
209
+ ydl_opts = {
210
+ 'format': 'bestaudio/best',
211
+ 'outtmpl': str(Path(global_download_dir) / f'%(title)s_{timestamp}.%(ext)s'),
212
+ 'postprocessors': [{
213
+ 'key': 'FFmpegExtractAudio',
214
+ 'preferredcodec': 'mp3',
215
+ 'preferredquality': '192',
216
+ }],
217
+ 'quiet': True,
218
+ 'no_warnings': True,
219
+ 'noprogress': True,
220
+ }
221
+ else:
222
+ # For video qualities
223
+ format_mapping = {
224
+ '360': 'best[height<=360]',
225
+ '480': 'best[height<=480]',
226
+ '720': 'best[height<=720]',
227
+ }
228
+ video_format = format_mapping.get(str(quality), 'best')
229
+
230
+ ydl_opts = {
231
+ 'format': video_format,
232
+ 'outtmpl': str(Path(global_download_dir) / f'%(title)s_{timestamp}.%(ext)s'),
233
+ 'quiet': True,
234
+ 'no_warnings': True,
235
+ 'noprogress': True,
236
+ 'merge_output_format': 'mp4'
237
+ }
238
+
239
+ try:
240
+ await run_in_threadpool(lambda: yt_dlp.YoutubeDL(ydl_opts).download([video_url]))
241
+ except Exception as e:
242
+ return {"error": f"Download failed: {str(e)}"}
243
+
244
+ # Find the downloaded file
245
+ downloaded_files = list(Path(global_download_dir).glob(f"*_{timestamp}.*"))
246
+ if not downloaded_files:
247
+ return {"error": "Download failed"}
248
+
249
+ downloaded_file = downloaded_files[0]
250
+ encoded_filename = urllib.parse.quote(downloaded_file.name)
251
+ download_url = f"{request.base_url}file/{encoded_filename}"
252
+
253
+ gc.collect()
254
+ return {"url": download_url}
255
+
256
+
257
+ @app.post("/hls")
258
+ async def download_hls_video(request: Request):
259
+ data = await request.json()
260
+ hls_url = data.get('url')
261
+
262
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
263
+ output_template = str(Path(global_download_dir) / f'%(title)s_{timestamp}.%(ext)s')
264
+
265
+ ydl_opts = {
266
+ 'format': 'best',
267
+ 'outtmpl': output_template,
268
+ 'quiet': True,
269
+ 'no_warnings': True,
270
+ 'noprogress': True,
271
+ 'merge_output_format': 'mp4'
272
+ }
273
+
274
+ try:
275
+ await run_in_threadpool(lambda: yt_dlp.YoutubeDL(ydl_opts).download([hls_url]))
276
+ except Exception as e:
277
+ return {"error": f"Download failed: {str(e)}"}
278
+
279
+ downloaded_files = list(Path(global_download_dir).glob(f"*_{timestamp}.mp4"))
280
+ if not downloaded_files:
281
+ return {"error": "Download failed"}
282
+
283
+ downloaded_file = downloaded_files[0]
284
+ encoded_filename = urllib.parse.quote(downloaded_file.name)
285
+ # Auto-detect the base URL from the request instead of using a fixed BASE_URL
286
+ download_url = f"{request.base_url}file/{encoded_filename}"
287
+ gc.collect()
288
+ return {"url": download_url}