Chrunos commited on
Commit
a7c3b28
·
verified ·
1 Parent(s): 97e71fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +384 -296
app.py CHANGED
@@ -1,331 +1,419 @@
 
 
 
 
 
 
1
  import os
2
- import re
 
 
 
3
  import time
4
  import asyncio
5
- from concurrent.futures import ThreadPoolExecutor
6
- from typing import List, Optional, Dict, Any
7
- from urllib.parse import urlparse
8
- from fastapi import FastAPI, HTTPException, Query, Request, BackgroundTasks
9
- from fastapi.middleware.cors import CORSMiddleware
10
- from fastapi.responses import JSONResponse
11
- from pydantic import BaseModel
12
- from selenium import webdriver
13
- from selenium.webdriver.common.by import By
14
- from selenium.webdriver.support.ui import WebDriverWait
15
- from selenium.webdriver.support import expected_conditions as EC
16
- from selenium.webdriver.chrome.options import Options
17
- from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
18
- import uvicorn
19
-
20
- app = FastAPI(
21
- title="Threads Media Extractor API",
22
- description="Extract media URLs from Threads posts - Optimized version",
23
- version="2.1.0"
24
- )
25
 
26
- # Add CORS middleware
27
- app.add_middleware(
28
- CORSMiddleware,
29
- allow_origins=["*"],
30
- allow_credentials=True,
31
- allow_methods=["*"],
32
- allow_headers=["*"],
33
- )
34
 
35
- # Global driver pool for reuse
36
- driver_pool = []
37
- executor = ThreadPoolExecutor(max_workers=2)
38
 
39
- class MediaItem(BaseModel):
 
 
 
 
 
 
 
 
 
 
40
  url: str
41
 
42
- class ThreadsResponse(BaseModel):
43
- post_url: str
44
- url: Optional[str] = None
45
- picker: Optional[List[MediaItem]] = None
46
- media_count: int
47
- post_text: Optional[str] = None
48
- author: Optional[str] = None
49
- success: bool
50
- processing_time: Optional[float] = None
51
 
52
  class ErrorResponse(BaseModel):
53
- error: str
54
- success: bool = False
55
-
56
- def create_optimized_driver():
57
- """Create and configure optimized Chrome WebDriver"""
58
- options = Options()
59
- options.add_argument('--headless=new') # Use new headless mode
60
- options.add_argument('--no-sandbox')
61
- options.add_argument('--disable-dev-shm-usage')
62
- options.add_argument('--disable-gpu')
63
- options.add_argument('--disable-extensions')
64
- options.add_argument('--disable-plugins')
65
- options.add_argument('--disable-default-apps')
66
- options.add_argument('--disable-background-timer-throttling')
67
- options.add_argument('--disable-backgrounding-occluded-windows')
68
- options.add_argument('--disable-renderer-backgrounding')
69
- options.add_argument('--disable-features=TranslateUI')
70
- options.add_argument('--disable-ipc-flooding-protection')
71
-
72
- # Performance optimizations
73
- options.add_argument('--memory-pressure-off')
74
- options.add_argument('--max_old_space_size=4096')
75
- options.add_argument('--window-size=1280,720') # Smaller window
76
-
77
- # Network optimizations
78
- options.add_argument('--aggressive-cache-discard')
79
- options.add_argument('--disable-background-networking')
80
-
81
- # Disable unnecessary features
82
- options.add_experimental_option('useAutomationExtension', False)
83
- options.add_experimental_option("excludeSwitches", ["enable-automation"])
84
- options.add_argument('--disable-blink-features=AutomationControlled')
85
-
86
- # User agent
87
- options.add_argument('--user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36')
88
-
89
- try:
90
- driver = webdriver.Chrome(options=options)
91
- driver.implicitly_wait(5) # Reduced wait time
92
- driver.set_page_load_timeout(15) # Reduced timeout
93
-
94
- # Optimize browser settings
95
- driver.execute_cdp_cmd('Network.setUserAgentOverride', {
96
- "userAgent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
97
- })
98
-
99
- return driver
100
- except Exception as e:
101
- raise HTTPException(status_code=500, detail=f"Failed to create browser driver: {str(e)}")
102
-
103
- def get_driver():
104
- """Get driver from pool or create new one"""
105
- if driver_pool:
106
- return driver_pool.pop()
107
- return create_optimized_driver()
108
-
109
- def return_driver(driver):
110
- """Return driver to pool for reuse"""
111
- if len(driver_pool) < 2: # Keep max 2 drivers in pool
112
- driver_pool.append(driver)
113
- else:
114
  try:
115
- driver.quit()
116
- except:
 
 
117
  pass
118
 
119
- def extract_post_id_from_url(url: str) -> Optional[str]:
120
- """Extract post ID from Threads URL"""
121
- patterns = [
122
- r'threads\.net/@[^/]+/post/([A-Za-z0-9_-]+)',
123
- r'threads\.net/t/([A-Za-z0-9_-]+)',
124
- r'threads\.com/@[^/]+/post/([A-Za-z0-9_-]+)',
125
- r'threads\.com/t/([A-Za-z0-9_-]+)',
126
- ]
127
-
128
- for pattern in patterns:
129
- match = re.search(pattern, url)
130
- if match:
131
- return match.group(1)
132
-
133
  return None
134
 
135
- def is_valid_threads_url(url: str) -> bool:
136
- """Validate if URL is a valid Threads URL"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  try:
138
- parsed = urlparse(url)
139
- return (
140
- parsed.netloc in ['threads.net', 'www.threads.net', 'threads.com', 'www.threads.com'] and
141
- (('/post/' in parsed.path) or ('/t/' in parsed.path))
142
- )
143
- except:
144
- return False
145
-
146
- def fast_extract_media(driver: webdriver.Chrome, url: str) -> Dict[str, Any]:
147
- """Optimized media extraction with faster loading"""
148
- media_urls = []
149
- post_text = None
150
- author = None
 
151
 
152
- try:
153
- start_time = time.time()
154
-
155
- # Navigate to the URL
156
- driver.get(url)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
- # Wait for essential elements only
159
- try:
160
- WebDriverWait(driver, 8).until(
161
- lambda d: d.execute_script("return document.readyState") == "complete"
162
- )
163
- except TimeoutException:
164
- pass # Continue even if timeout
 
 
 
 
 
 
 
 
 
165
 
166
- # Quick wait for dynamic content
167
- time.sleep(1.5) # Reduced from 3 seconds
 
 
 
 
 
168
 
169
- # Extract videos first (most important)
170
- video_elements = driver.find_elements(By.TAG_NAME, 'video')
171
- for video in video_elements:
172
- src = video.get_attribute('src')
173
- if src and src.startswith('http'):
174
- media_urls.append(src)
175
-
176
- # Check source elements
177
- sources = video.find_elements(By.TAG_NAME, 'source')
178
- for source in sources:
179
- src = source.get_attribute('src')
180
- if src and src.startswith('http'):
181
- media_urls.append(src)
182
 
183
- # If no videos found, look for images quickly
184
- if not media_urls:
185
- img_elements = driver.find_elements(By.TAG_NAME, 'img')[:10] # Limit to first 10 images
186
- for img in img_elements:
187
- src = img.get_attribute('src')
188
- if src and src.startswith('http') and any(ext in src.lower() for ext in ['.jpg', '.jpeg', '.png', '.gif', '.webp']):
189
- if not any(exclude in src.lower() for exclude in ['profile', 'avatar', 'icon', 'logo']):
190
- media_urls.append(src)
191
 
192
- # Quick text extraction (optional, skip if taking too long)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  try:
194
- text_elements = driver.find_elements(By.CSS_SELECTOR, 'div[role="article"] span, article span')[:5]
195
- for element in text_elements:
196
- text = element.text.strip()
197
- if text and len(text) > 10 and not post_text:
198
- post_text = text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  break
200
- except:
201
- pass
202
-
203
- # Remove duplicates
204
- seen = set()
205
- unique_media_urls = []
206
- for url in media_urls:
207
- if url not in seen:
208
- seen.add(url)
209
- unique_media_urls.append(url)
210
-
211
- processing_time = time.time() - start_time
212
-
213
- return {
214
- "media_urls": unique_media_urls,
215
- "post_text": post_text,
216
- "author": author,
217
- "processing_time": processing_time
218
- }
219
-
220
- except Exception as e:
221
- raise HTTPException(status_code=500, detail=f"Error extracting media: {str(e)}")
222
 
223
- def extract_media_sync(url: str) -> Dict[str, Any]:
224
- """Synchronous wrapper for thread execution"""
225
- driver = None
226
- try:
227
- driver = get_driver()
228
- result = fast_extract_media(driver, url)
229
- return result
230
- finally:
231
- if driver:
232
- return_driver(driver)
233
-
234
- @app.get("/", response_model=Dict[str, str])
235
- async def root():
236
- """Root endpoint with API information"""
237
- return {
238
- "message": "Threads Media Extractor API v2.1 - Optimized",
239
- "description": "Fast extraction of media URLs from Threads posts",
240
- "version": "2.1.0",
241
- "optimization": "Driver pooling, reduced timeouts, focused extraction"
242
- }
243
 
244
- @app.get("/health")
245
- async def health_check():
246
- """Health check endpoint"""
247
- return {
248
- "status": "healthy",
249
- "service": "threads-media-extractor",
250
- "version": "2.1.0",
251
- "driver_pool_size": len(driver_pool)
252
- }
253
 
254
- @app.get("/extract", response_model=ThreadsResponse)
255
- async def extract_media(url: str = Query(..., description="Threads post URL")):
256
- """
257
- Extract media URLs from a Threads post - Optimized version
258
-
259
- Args:
260
- url: The Threads post URL to extract media from
261
-
262
- Returns:
263
- ThreadsResponse with media URLs and metadata
264
- """
265
 
266
- # Validate URL
267
- if not url:
268
- raise HTTPException(status_code=400, detail="URL parameter is required")
 
 
 
 
269
 
270
- if not is_valid_threads_url(url):
271
- raise HTTPException(status_code=400, detail="Invalid Threads URL format")
 
 
 
 
 
 
 
 
 
 
 
272
 
273
- # Extract post ID
274
- post_id = extract_post_id_from_url(url)
275
- if not post_id:
276
- raise HTTPException(status_code=400, detail="Could not extract post ID from URL")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
 
 
 
 
 
 
 
 
 
278
  try:
279
- # Run extraction in thread pool for better async handling
280
- loop = asyncio.get_event_loop()
281
- extracted_data = await loop.run_in_executor(executor, extract_media_sync, url)
282
-
283
- media_urls = extracted_data["media_urls"]
284
- media_count = len(media_urls)
285
-
286
- response_data = {
287
- "post_url": url,
288
- "media_count": media_count,
289
- "post_text": extracted_data["post_text"],
290
- "author": extracted_data["author"],
291
- "success": True,
292
- "processing_time": extracted_data.get("processing_time")
293
- }
294
 
295
- if media_count == 1:
296
- response_data["url"] = media_urls[0]
 
 
 
297
  else:
298
- response_data["picker"] = [{"url": url} for url in media_urls]
299
-
300
- return ThreadsResponse(**response_data)
301
-
302
- except HTTPException:
303
- raise
304
- except Exception as e:
305
- raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
306
-
307
- @app.on_event("shutdown")
308
- async def shutdown_event():
309
- """Clean up resources on shutdown"""
310
- executor.shutdown(wait=False)
311
- for driver in driver_pool:
312
- try:
313
- driver.quit()
314
- except:
315
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
- @app.exception_handler(HTTPException)
318
- async def http_exception_handler(request: Request, exc: HTTPException):
319
- """Custom HTTP exception handler"""
320
- return JSONResponse(
321
- status_code=exc.status_code,
322
- content={
323
- "error": exc.detail,
324
- "success": False,
325
- "status_code": exc.status_code
326
- }
327
- )
328
-
329
- if __name__ == "__main__":
330
- port = int(os.environ.get("PORT", 7860))
331
- uvicorn.run(app, host="0.0.0.0", port=port)
 
1
+ from fastapi import FastAPI, HTTPException, Request
2
+ from fastapi.responses import HTMLResponse
3
+ from fastapi.staticfiles import StaticFiles
4
+ from fastapi.templating import Jinja2Templates
5
+ from pydantic import BaseModel
6
+ from ytmusicapi import YTMusic
7
  import os
8
+ import logging
9
+ import requests
10
+ from datetime import datetime, timedelta
11
+ from collections import defaultdict
12
  import time
13
  import asyncio
14
+ import cloudscraper
15
+ from urllib.parse import urlparse, parse_qs
16
+ from collections import defaultdict
17
+ import threading
18
+ from typing import Optional, Dict, Any
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ app = FastAPI()
 
 
 
 
 
 
 
21
 
22
+ # Mount static files and templates
23
+ app.mount("/static", StaticFiles(directory="static"), name="static")
24
+ templates = Jinja2Templates(directory="templates")
25
 
26
+ ytmusic = YTMusic()
27
+
28
+ # Configure logging
29
+ logging.basicConfig(level=logging.INFO)
30
+ logger = logging.getLogger(__name__)
31
+
32
+ # Pydantic models for request/response validation
33
+ class SearchRequest(BaseModel):
34
+ query: str
35
+
36
+ class MatchRequest(BaseModel):
37
  url: str
38
 
39
+ class TrackDownloadRequest(BaseModel):
40
+ track_id: str
41
+ quality: str = "128"
42
+
43
+ class MatchResponse(BaseModel):
44
+ url: str
45
+ filename: str
46
+ track_id: str
 
47
 
48
  class ErrorResponse(BaseModel):
49
+ detail: str
50
+ premium: Optional[str] = None
51
+
52
+ @app.get("/", response_class=HTMLResponse)
53
+ async def index(request: Request):
54
+ return templates.TemplateResponse("index.html", {"request": request})
55
+
56
+ @app.post("/search")
57
+ async def search(request: SearchRequest):
58
+ search_results = ytmusic.search(request.query, filter="songs")
59
+ return search_results
60
+
61
+ @app.post("/searcht")
62
+ async def searcht(request: SearchRequest):
63
+ logger.info(f"search query: {request.query}")
64
+ search_results = ytmusic.search(request.query, filter="songs")
65
+ first_song = next((song for song in search_results if 'videoId' in song and song['videoId']), {}) if search_results else {}
66
+ return first_song
67
+
68
+ def extract_amazon_track_id(url: str) -> Optional[str]:
69
+ """
70
+ Extracts track ID from various Amazon Music URL formats.
71
+ """
72
+ if "music.amazon.com" not in url:
73
+ return None
74
+
75
+ parsed_url = urlparse(url)
76
+ query_params = parse_qs(parsed_url.query)
77
+
78
+ if "trackAsin" in query_params:
79
+ return query_params["trackAsin"][0]
80
+
81
+ path_parts = parsed_url.path.split('/')
82
+ if "tracks" in path_parts:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  try:
84
+ track_id_index = path_parts.index("tracks") + 1
85
+ if track_id_index < len(path_parts):
86
+ return path_parts[track_id_index]
87
+ except (ValueError, IndexError):
88
  pass
89
 
90
+ logger.warning(f"Could not extract Amazon track ID from URL: {url}")
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  return None
92
 
93
+ def get_song_link_info(url: str) -> Optional[Dict[str, Any]]:
94
+ """
95
+ Fetches track information from the Song.link API.
96
+ Uses requests.get() which is a blocking call.
97
+ """
98
+ api_base_url = "https://api.song.link/v1-alpha.1/links"
99
+ params = {"userCountry": "US"}
100
+
101
+ if "music.amazon.com" in url:
102
+ track_id = extract_amazon_track_id(url)
103
+ if track_id:
104
+ params["platform"] = "amazonMusic"
105
+ params["id"] = track_id
106
+ params["type"] = "song"
107
+ else:
108
+ params["url"] = url
109
+ else:
110
+ params["url"] = url
111
+
112
  try:
113
+ logger.info(f"Querying Song.link API with params: {params}")
114
+ response = requests.get(api_base_url, params=params, timeout=10)
115
+ response.raise_for_status()
116
+ return response.json()
117
+ except requests.exceptions.RequestException as e:
118
+ logger.error(f"Error fetching from Song.link API: {e}")
119
+ return None
120
+
121
+ def extract_url(links_by_platform: dict, platform: str) -> Optional[str]:
122
+ """
123
+ Extracts a specific platform URL from Song.link API response.
124
+ """
125
+ if platform in links_by_platform and links_by_platform[platform].get("url"):
126
+ return links_by_platform[platform]["url"]
127
 
128
+ logger.warning(f"No URL found for platform '{platform}' in links: {links_by_platform.keys()}")
129
+ return None
130
+
131
+ @app.post("/match", response_model=MatchResponse)
132
+ async def match(request: MatchRequest):
133
+ """
134
+ Matches a given music track URL to a YouTube Music URL.
135
+ """
136
+ track_url = request.url
137
+ logger.info(f"Match endpoint: Processing URL: {track_url}")
138
+
139
+ track_info = get_song_link_info(track_url)
140
+ if not track_info:
141
+ logger.error(f"Match endpoint: Could not fetch track info for URL: {track_url}")
142
+ raise HTTPException(status_code=404, detail="Could not fetch track info from Song.link API.")
143
+
144
+ entity_unique_id = track_info.get("entityUniqueId")
145
+ title = None
146
+ artist = None
147
+
148
+ if entity_unique_id and entity_unique_id in track_info.get("entitiesByUniqueId", {}):
149
+ main_entity = track_info["entitiesByUniqueId"][entity_unique_id]
150
+ title = main_entity.get("title")
151
+ artist = main_entity.get("artistName")
152
+ logger.info(f"Match endpoint: Found main entity - Title: '{title}', Artist: '{artist}'")
153
+ else:
154
+ logger.warning(f"Match endpoint: Could not find main entity details for {track_url} using entityUniqueId: {entity_unique_id}")
155
 
156
+ # Fallback logic to find title/artist from other entities
157
+ for entity_id, entity_data in track_info.get("entitiesByUniqueId", {}).items():
158
+ if entity_data.get("title") and entity_data.get("artistName"):
159
+ title = entity_data.get("title")
160
+ artist = entity_data.get("artistName")
161
+ logger.info(f"Match endpoint: Using fallback entity - Title: '{title}', Artist: '{artist}' from entity ID {entity_id}")
162
+ break
163
+
164
+ if not title or not artist:
165
+ logger.error(f"Match endpoint: Could not determine title and artist for URL: {track_url}")
166
+ raise HTTPException(status_code=404, detail="Could not determine title and artist from Song.link info.")
167
+
168
+ youtube_url = extract_url(track_info.get("linksByPlatform", {}), "youtube")
169
+
170
+ if youtube_url:
171
+ video_id = None
172
 
173
+ if "v=" in youtube_url:
174
+ video_id = youtube_url.split("v=")[1].split("&")[0]
175
+ elif "youtu.be/" in youtube_url:
176
+ video_id = youtube_url.split("youtu.be/")[1].split("?")[0]
177
+
178
+ filename = f"{title} - {artist}" if title and artist else "Unknown Track - Unknown Artist"
179
+ logger.info(f"Match endpoint: Found direct YouTube URL: {youtube_url}, Video ID: {video_id}")
180
 
181
+ return MatchResponse(url=youtube_url, filename=filename, track_id=video_id)
182
+ else:
183
+ logger.info(f"Match endpoint: No direct YouTube URL. Searching YTMusic with: '{title} - {artist}'")
 
 
 
 
 
 
 
 
 
 
184
 
185
+ search_query = f'{title} {artist}'
186
+ search_results = ytmusic.search(search_query, filter="songs")
 
 
 
 
 
 
187
 
188
+ if search_results:
189
+ first_song = next((song for song in search_results if song.get('videoId')), None)
190
+
191
+ if first_song and first_song.get('videoId'):
192
+ video_id = first_song["videoId"]
193
+ ym_url = f'https://music.youtube.com/watch?v={video_id}'
194
+
195
+ # Get artist name safely
196
+ artist_name = artist
197
+ if first_song.get('artists') and len(first_song['artists']) > 0:
198
+ artist_name = first_song['artists'][0]['name']
199
+
200
+ filename = f"{first_song.get('title', title)} - {artist_name}"
201
+ logger.info(f"Match endpoint: Found YTMusic search result - URL: {ym_url}, Video ID: {video_id}")
202
+
203
+ return MatchResponse(filename=filename, url=ym_url, track_id=video_id)
204
+ else:
205
+ logger.error(f"Match endpoint: YTMusic search for '{search_query}' yielded no results with a videoId.")
206
+ raise HTTPException(status_code=404, detail="No matching video ID found on YouTube Music after search.")
207
+ else:
208
+ logger.error(f"Match endpoint: YTMusic search for '{search_query}' yielded no results.")
209
+ raise HTTPException(status_code=404, detail="No results found on YouTube Music for the track.")
210
+
211
+ class ApiRotator:
212
+ def __init__(self, apis):
213
+ self.apis = apis
214
+ self.last_successful_index = None
215
+
216
+ def get_prioritized_apis(self):
217
+ if self.last_successful_index is not None:
218
+ rotated_apis = (
219
+ [self.apis[self.last_successful_index]] +
220
+ self.apis[:self.last_successful_index] +
221
+ self.apis[self.last_successful_index+1:]
222
+ )
223
+ return rotated_apis
224
+ return self.apis
225
+
226
+ def update_last_successful(self, index):
227
+ self.last_successful_index = index
228
+
229
+ api_rotator = ApiRotator([
230
+ "https://dwnld.nichind.dev",
231
+ "https://yt.edd1e.xyz/",
232
+ "http://34.107.254.11"
233
+ ])
234
+
235
+ async def get_track_download_url(track_id: str, quality: str) -> str:
236
+ apis = api_rotator.get_prioritized_apis()
237
+ session = cloudscraper.create_scraper()
238
+
239
+ headers = {
240
+ "Accept": "application/json",
241
+ "Content-Type": "application/json",
242
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
243
+ }
244
+
245
+ for i, api_url in enumerate(apis):
246
  try:
247
+ logger.info(f"Attempting to get download URL from: {api_url}")
248
+ y_url = f"https://youtu.be/{track_id}"
249
+
250
+ # Use asyncio to run the blocking request in a thread pool
251
+ loop = asyncio.get_event_loop()
252
+ response = await loop.run_in_executor(
253
+ None,
254
+ lambda: session.post(
255
+ api_url,
256
+ timeout=20,
257
+ json={"url": y_url, "audioFormat": "mp3", "downloadMode": "audio", "audioBitrate": quality},
258
+ headers=headers
259
+ )
260
+ )
261
+
262
+ logger.info(f"Response status: {response.status_code}")
263
+ logger.info(f"Response content: {response.content}")
264
+
265
+ if response.headers.get('content-type', '').startswith('application/json'):
266
+ json_response = response.json()
267
+ error_code = json_response.get("error", {}).get("code", "")
268
+
269
+ if error_code == "error.api.content.video.unavailable":
270
+ logger.warning(f"Video unavailable error from {api_url}")
271
  break
272
+
273
+ if "url" in json_response:
274
+ api_rotator.update_last_successful(i)
275
+ return json_response["url"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
+ except Exception as e:
278
+ logger.error(f"Failed with {api_url}: {str(e)}")
279
+ continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
+ logger.error(f"No download URL found")
282
+ return ""
 
 
 
 
 
 
 
283
 
284
+
285
+ # Rate limiting dictionary
286
+ class RateLimiter:
287
+ def __init__(self, max_requests: int, time_window: timedelta):
288
+ self.max_requests = max_requests
289
+ self.time_window = time_window
290
+ self.requests: Dict[str, list] = defaultdict(list)
 
 
 
 
291
 
292
+ def _cleanup_old_requests(self, user_ip: str) -> None:
293
+ """Remove requests that are outside the time window."""
294
+ current_time = time.time()
295
+ self.requests[user_ip] = [
296
+ timestamp for timestamp in self.requests[user_ip]
297
+ if current_time - timestamp < self.time_window.total_seconds()
298
+ ]
299
 
300
+ def is_rate_limited(self, user_ip: str) -> bool:
301
+ """Check if the user has exceeded their rate limit."""
302
+ self._cleanup_old_requests(user_ip)
303
+
304
+ # Get current count after cleanup
305
+ current_count = len(self.requests[user_ip])
306
+
307
+ # Add current request timestamp (incrementing the count)
308
+ current_time = time.time()
309
+ self.requests[user_ip].append(current_time)
310
+
311
+ # Check if user has exceeded the maximum requests
312
+ return (current_count + 1) > self.max_requests
313
 
314
+ def get_current_count(self, user_ip: str) -> int:
315
+ """Get the current request count for an IP."""
316
+ self._cleanup_old_requests(user_ip)
317
+ return len(self.requests[user_ip])
318
+
319
+
320
+ # Initialize rate limiter with 100 requests per day
321
+ rate_limiter = RateLimiter(
322
+ max_requests=6,
323
+ time_window=timedelta(days=1)
324
+ )
325
+
326
+ def get_user_ip(request: Request) -> str:
327
+ """Helper function to get user's IP address."""
328
+ forwarded = request.headers.get("X-Forwarded-For")
329
+ if forwarded:
330
+ return forwarded.split(",")[0]
331
+ return request.client.host
332
+
333
+
334
+ class ApiRotator:
335
+ def __init__(self, apis):
336
+ self.apis = apis
337
+ self.last_successful_index = None
338
+
339
+ def get_prioritized_apis(self):
340
+ if self.last_successful_index is not None:
341
+ # Move the last successful API to the front
342
+ rotated_apis = (
343
+ [self.apis[self.last_successful_index]] +
344
+ self.apis[:self.last_successful_index] +
345
+ self.apis[self.last_successful_index+1:]
346
+ )
347
+ return rotated_apis
348
+ return self.apis
349
+
350
+ def update_last_successful(self, index):
351
+ self.last_successful_index = index
352
+
353
+ @app.post("/track_dl")
354
+ async def track_dl(request: TrackDownloadRequest, req: Request):
355
+ user_ip = get_user_ip(req)
356
 
357
+ if rate_limiter.is_rate_limited(user_ip):
358
+ current_count = rate_limiter.get_current_count(user_ip)
359
+ raise HTTPException(
360
+ status_code=429,
361
+ detail={
362
+ "error": "You have exceeded the maximum number of requests per day. Please try again tomorrow.",
363
+ "url": "https://t.me/chrunoss"
364
+ }
365
+ )
366
  try:
367
+ quality_num = int(request.quality)
368
+ if quality_num > 128 or request.quality.upper() == 'FLAC':
369
+ raise HTTPException(
370
+ status_code=400,
371
+ detail={
372
+ "error": "Quality above 128 or FLAC is for Premium users Only.",
373
+ "premium": "https://chrunos.com/premium-shortcuts/"
374
+ }
375
+ )
376
+
377
+ dl_url = await get_track_download_url(request.track_id, request.quality)
 
 
 
 
378
 
379
+ if dl_url and "http" in dl_url:
380
+ return {
381
+ "url": dl_url,
382
+ "premium": "https://chrunos.com/premium-shortcuts/"
383
+ }
384
  else:
385
+ raise HTTPException(
386
+ status_code=400,
387
+ detail={
388
+ "error": "Failed to Fetch the Track.",
389
+ "premium": "https://chrunos.com/premium-shortcuts/"
390
+ }
391
+ )
392
+
393
+ except ValueError:
394
+ raise HTTPException(
395
+ status_code=400,
396
+ detail={
397
+ "error": "Invalid quality value provided. It should be a valid integer or FLAC.",
398
+ "premium": "https://chrunos.com/premium-shortcuts/"
399
+ }
400
+ )
401
+
402
+ @app.get("/get_artist")
403
+ async def get_artist(id: str):
404
+ artist_info = ytmusic.get_artist(id)
405
+ return artist_info
406
+
407
+ @app.get("/get_album")
408
+ async def get_album(id: str):
409
+ album_info = ytmusic.get_album(id)
410
+ return album_info
411
+
412
+ @app.get("/get_song")
413
+ async def get_song(id: str):
414
+ song_info = ytmusic.get_song(id)
415
+ return song_info
416
 
417
+ if __name__ == '__main__':
418
+ import uvicorn
419
+ uvicorn.run(app, host='0.0.0.0', port=7860)