Chrunos commited on
Commit
25289f5
·
verified ·
1 Parent(s): 5251503

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +260 -216
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import base64
2
  import logging
3
- from typing import Optional, Dict
4
  from fastapi import FastAPI, HTTPException, Request
5
  import requests
6
  from bs4 import BeautifulSoup
@@ -13,9 +13,6 @@ import asyncio
13
  from typing import Optional, Dict, Tuple
14
  import urllib.parse
15
  from fastapi.responses import JSONResponse
16
- import re
17
- from ytmusicapi import YTMusic
18
-
19
 
20
  # Configure logging
21
  logging.basicConfig(
@@ -28,11 +25,16 @@ logging.basicConfig(
28
  )
29
  logger = logging.getLogger(__name__)
30
 
 
 
 
 
 
 
 
31
  app = FastAPI(title="Spotify Track API",
32
  description="API for retrieving Spotify track information and download URLs")
33
 
34
- ytmusic = YTMusic()
35
-
36
  # Constants
37
  SPOTIFY_API_URL = "https://api.spotify.com/v1"
38
  SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
@@ -109,25 +111,6 @@ def get_spotify_token() -> str:
109
  logger.error(f"Unexpected error during token request: {str(e)}")
110
  raise HTTPException(status_code=500, detail="Internal server error")
111
 
112
- async def get_download_url(url):
113
- api_url = "https://chrunos-ytdl2.hf.space/download"
114
- data = {"url": url}
115
- headers = {
116
- "Content-Type": "application/json"
117
- }
118
- try:
119
- response = requests.post(api_url, json=data, headers=headers)
120
- if response.status_code == 200:
121
- result = response.json()
122
- return result.get('download_url')
123
- else:
124
- logger.error(f"请求失败,状态码: {response.status_code}")
125
- return None
126
- except requests.RequestException as e:
127
- logger.error(f"发生客户端错误: {e}")
128
- return None
129
-
130
-
131
  def extract_album_id(album_url: str) -> str:
132
  """Extract album ID from Spotify URL."""
133
  try:
@@ -135,39 +118,6 @@ def extract_album_id(album_url: str) -> str:
135
  except Exception as e:
136
  logger.error(f"Failed to extract album ID from URL {album_url}: {str(e)}")
137
  raise HTTPException(status_code=400, detail="Invalid Spotify album URL format")
138
-
139
-
140
- async def fetch_spotify_track_info(track_id: str) -> Dict:
141
- """
142
- Asynchronously fetch Spotify track title, artist, and cover art URL.
143
- """
144
- token = get_spotify_token()
145
- headers = {
146
- 'Authorization': f'Bearer {token}'
147
- }
148
- url = f"{SPOTIFY_API_URL}/tracks/{track_id}"
149
- try:
150
- loop = asyncio.get_running_loop()
151
- response = await loop.run_in_executor(None, requests.get, url, {'headers': headers})
152
- if response.status_code == 200:
153
- track_data = response.json()
154
- title = track_data.get('name')
155
- artists = [artist.get('name') for artist in track_data.get('artists', [])]
156
- cover_art_url = track_data.get('album', {}).get('images', [{}])[0].get('url')
157
-
158
- return {
159
- 'title': title,
160
- 'artists': artists,
161
- 'cover_art_url': cover_art_url
162
- }
163
- else:
164
- raise SpotifyAPIError(f"Failed to fetch track information: {response.text}")
165
- except requests.exceptions.RequestException as e:
166
- logger.error(f"Network error during track information request: {str(e)}")
167
- raise HTTPException(status_code=503, detail="Spotify API service unavailable")
168
- except Exception as e:
169
- logger.error(f"Unexpected error during track information request: {str(e)}")
170
- raise HTTPException(status_code=500, detail="Internal server error")
171
 
172
 
173
  @app.post("/album")
@@ -265,192 +215,286 @@ def extract_track_id(track_url: str) -> str:
265
 
266
 
267
 
 
 
 
 
 
268
 
269
-
270
-
271
-
272
- def get_cookie():
 
273
  headers = {
274
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
 
 
 
 
275
  }
 
 
 
276
 
277
  try:
278
- session = requests.Session()
279
- response = session.get('https://spotisongdownloader.to/', headers=headers)
280
- response.raise_for_status()
281
- cookies = session.cookies.get_dict()
282
- return f"PHPSESSID={cookies['PHPSESSID']}; quality=m4a"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
 
284
- except requests.exceptions.RequestException:
 
 
 
 
 
 
 
 
285
  return None
286
 
287
 
288
- def get_api():
289
- headers = {
290
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
291
- }
292
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  try:
294
- response = requests.get('https://spotisongdownloader.to/track.php', headers=headers)
295
- response.raise_for_status()
 
 
 
 
 
296
 
297
- match = re.search(r'url:\s*"(/api/composer/spotify/[^"]+)"', response.text)
298
- if match:
299
- api_endpoint = match.group(1)
300
- return f"https://spotisongdownloader.to{api_endpoint}"
301
 
302
- except requests.exceptions.RequestException:
303
- return None
 
 
 
304
 
 
305
 
306
- def get_data(track_id):
307
- link = f"https://open.spotify.com/track/{track_id}"
308
- try:
309
- response = requests.get(
310
- 'https://spotisongdownloader.to/api/composer/spotify/xsingle_track.php',
311
- params={'url': link}
312
- )
313
- return response.json()
314
 
315
- except:
316
- return None
317
 
 
 
 
 
 
 
 
318
 
319
- def get_url(track_data, cookie):
320
- url = get_api()
321
- if not url:
322
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
 
324
- payload = {
325
- 'song_name': track_data['song_name'],
326
- 'artist_name': track_data['artist'],
327
- 'url': track_data['url']
328
- }
329
 
330
- headers = {
331
- 'Accept': 'application/json, text/javascript, */*; q=0.01',
332
- 'Cookie': cookie,
333
- 'Origin': 'https://spotisongdownloader.to',
334
- 'Referer': 'https://spotisongdownloader.to/track.php',
335
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
336
- }
 
 
 
 
 
 
 
 
 
 
 
337
 
338
- try:
339
- response = requests.post(url, data=payload, headers=headers)
340
- response.raise_for_status()
341
- download_data = response.json()
342
 
343
- encoded_link = urllib.parse.quote(download_data['dlink'], safe=':/?=')
344
- return encoded_link
 
 
 
 
345
 
346
- except:
347
- return None
348
 
349
- '''
350
- @app.get("/{track_id}")
351
- async def download_track(track_id: str):
352
- cookie = get_cookie()
353
- if not cookie:
354
- return {"error": "Failed to get session cookie"}, 500
355
-
356
- track_data = get_data(track_id)
357
- if not track_data:
358
- return {"error": "Failed to get track data"}, 404
359
-
360
- download_link = get_url(track_data, cookie)
361
- if not download_link:
362
- return {"error": "Failed to get download URL"}, 500
363
-
364
- return {"url": download_link}
365
- '''
366
-
367
- @app.get("/{track_id}")
368
- async def download_track(track_id: str):
369
- url = f'https://open.spotify.com/track/{track_id}'
370
- track_data = get_song_link_info(url)
371
- if not track_data:
372
- track_data = fetch_spotify_track_info(track_id)
373
- title = track_data["title"]
374
- artist = track_data["artist"]
375
- query = f'{title}+{artist}'
376
- logger.info(f"search query: {query}")
377
- search_results = ytmusic.search(query, filter="songs")
378
- first_song = next((song for song in search_results if 'videoId' in song and song['videoId']), {}) if search_results else {}
379
- if 'videoId' in first_song:
380
- videoId = first_song["videoId"]
381
- ym_url = f'https://www.youtube.com/watch?v={videoId}'
382
- d_data = await get_download_url(ym_url)
383
- track_data['download_url'] = d_data
384
- return track_data
385
- else:
386
- yt_url = track_data['url']
387
- logger.info(yt_url)
388
- d_data = await get_download_url(yt_url)
389
- logger.info(d_data)
390
- track_data['download_url'] = d_data
391
- return track_data
392
 
393
 
394
- # Function to extract Tidal or YouTube URL
395
- def extract_url(links_by_platform: dict, platform: str):
396
- if platform in links_by_platform:
397
- return links_by_platform[platform]["url"]
398
- return None
399
-
400
- # Function to get track info from Song.link API
401
- def get_song_link_info(url: str):
402
- # Check if the URL is from Amazon Music
403
- if "music.amazon.com" in url:
404
- track_id = extract_amazon_track_id(url)
405
- if track_id:
406
- # Use the working format for Amazon Music tracks
407
- api_url = f"https://api.song.link/v1-alpha.1/links?type=song&platform=amazonMusic&id={track_id}&userCountry=US"
408
- else:
409
- # If no track ID is found, use the original URL
410
- api_url = f"https://api.song.link/v1-alpha.1/links?url={url}&userCountry=US"
411
- else:
412
- # For non-Amazon Music URLs, use the standard format
413
- api_url = f"https://api.song.link/v1-alpha.1/links?url={url}&userCountry=US"
414
-
415
- # Make the API call
416
- response = requests.get(api_url)
417
- if response.status_code == 200:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
  try:
419
- track_info = response.json()
420
- if not track_info:
421
- raise HTTPException(status_code=404, detail="Could not fetch track info")
422
- entityUniqueId = track_info["entityUniqueId"]
423
- title = track_info["entitiesByUniqueId"][entityUniqueId]["title"]
424
- artist = track_info["entitiesByUniqueId"][entityUniqueId]["artistName"]
425
- filename = f"{title} - {artist}"
426
-
427
- #extract YouTube URL
428
- youtube_url = extract_url(track_info["linksByPlatform"], "youtube")
429
- if youtube_url:
430
- if title and artist:
431
- filename = f"{title} - {artist}"
432
- return {"url": youtube_url, "filename": filename}
433
- else:
434
- return {"url": youtube_url, "filename": "Unknown Track - Unknown Artist"}
435
- else:
436
- return None
437
- except ValueError:
438
- raise HTTPException(status_code=500, detail="Error parsing API response as JSON")
439
- else:
440
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441
 
442
 
443
- '''
444
- else:
445
- query = f'{title}+{artist}'
446
- logger.info(f"search query: {query}")
447
- search_results = ytmusic.search(query, filter="songs")
448
- first_song = next((song for song in search_results if 'videoId' in song and song['videoId']), {}) if search_results else {}
449
- if 'videoId' in first_song:
450
- videoId = first_song["videoId"]
451
- ym_url = f'https://www.youtube.com/watch?v={videoId}'
452
- return {"filename": filename, "url": ym_url, "track_id": videoId}
453
- '''
454
 
455
  @app.get("/")
456
  async def health_check():
 
1
  import base64
2
  import logging
3
+ from typing import Optional, Dict, Any
4
  from fastapi import FastAPI, HTTPException, Request
5
  import requests
6
  from bs4 import BeautifulSoup
 
13
  from typing import Optional, Dict, Tuple
14
  import urllib.parse
15
  from fastapi.responses import JSONResponse
 
 
 
16
 
17
  # Configure logging
18
  logging.basicConfig(
 
25
  )
26
  logger = logging.getLogger(__name__)
27
 
28
+ # Define Pydantic model for request body validation (optional but recommended)
29
+ from pydantic import BaseModel, HttpUrl
30
+
31
+ class TrackDlRequest(BaseModel):
32
+ spotify_url: str # Keep as str for flexibility, could use HttpUrl if strict validation needed
33
+ album_cover_url: Optional[str] = None # Make cover optional
34
+
35
  app = FastAPI(title="Spotify Track API",
36
  description="API for retrieving Spotify track information and download URLs")
37
 
 
 
38
  # Constants
39
  SPOTIFY_API_URL = "https://api.spotify.com/v1"
40
  SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
 
111
  logger.error(f"Unexpected error during token request: {str(e)}")
112
  raise HTTPException(status_code=500, detail="Internal server error")
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  def extract_album_id(album_url: str) -> str:
115
  """Extract album ID from Spotify URL."""
116
  try:
 
118
  except Exception as e:
119
  logger.error(f"Failed to extract album ID from URL {album_url}: {str(e)}")
120
  raise HTTPException(status_code=400, detail="Invalid Spotify album URL format")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
 
123
  @app.post("/album")
 
215
 
216
 
217
 
218
+ # --- MODIFIED Function to get CSRF token and Session from Spowload ---
219
+ def get_spowload_session_and_token() -> Optional[Tuple[requests.Session, str]]:
220
+ """
221
+ Creates a requests session, fetches the spowload.com homepage,
222
+ extracts the CSRF token, and returns both the session and the token.
223
 
224
+ Returns:
225
+ A tuple containing the (requests.Session, csrf_token_string) if successful,
226
+ otherwise None.
227
+ """
228
+ spowload_url = "https://spowload.com" # Use https for security
229
  headers = {
230
+ # Mimic a common browser user-agent
231
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
232
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
233
+ 'Accept-Language': 'en-US,en;q=0.9',
234
+ 'Connection': 'keep-alive',
235
  }
236
+ # Create a session object to persist cookies
237
+ session = requests.Session()
238
+ session.headers.update(headers) # Set default headers for the session
239
 
240
  try:
241
+ logger.info(f"Attempting to fetch CSRF token and session cookies from {spowload_url}")
242
+ # Use the session to make the GET request
243
+ response = session.get(spowload_url, timeout=15) # Use session.get
244
+ response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
245
+
246
+ # Parse the HTML content
247
+ soup = BeautifulSoup(response.text, 'html.parser')
248
+
249
+ # Find the meta tag with name="csrf-token"
250
+ meta_tag = soup.find('meta', attrs={'name': 'csrf-token'})
251
+
252
+ if meta_tag and 'content' in meta_tag.attrs:
253
+ csrf_token = meta_tag['content']
254
+ logger.info(f"Successfully extracted CSRF token and established session.")
255
+ # Return the session object AND the token
256
+ return session, csrf_token
257
+ else:
258
+ logger.warning(f"Could not find meta tag with name='csrf-token' or 'content' attribute at {spowload_url}")
259
+ return None
260
 
261
+ except requests.exceptions.Timeout:
262
+ logger.error(f"Request timed out while trying to reach {spowload_url}")
263
+ return None
264
+ except requests.exceptions.RequestException as e:
265
+ logger.error(f"Error fetching {spowload_url}: {str(e)}")
266
+ return None
267
+ except Exception as e:
268
+ # Catch potential BeautifulSoup errors or other unexpected issues
269
+ logger.exception(f"An unexpected error occurred while getting CSRF token/session: {str(e)}")
270
  return None
271
 
272
 
273
+ # --- /track ENDPOINT (from previous version, CSRF call removed for clarity) ---
274
+ @app.post("/track", response_model=Dict)
275
+ async def get_track_data(request: Request):
276
+ """
277
+ Retrieves specific track information from Spotify based on a track URL.
278
+
279
+ Expects JSON body: {"track_url": "spotify-track-url"}
280
+
281
+ Returns:
282
+ JSON response with track details:
283
+ {
284
+ "id": str,
285
+ "title": str,
286
+ "artists": list[str],
287
+ "album_cover_url": str | None,
288
+ "duration_ms": int,
289
+ "spotify_url": str
290
+ }
291
+ or an HTTP error response.
292
+ """
293
  try:
294
+ # 1. Get data from request
295
+ try:
296
+ data = await request.json()
297
+ track_url = data.get('track_url')
298
+ except Exception:
299
+ logger.error("Failed to parse request JSON body.")
300
+ raise HTTPException(status_code=400, detail="Invalid JSON body.")
301
 
302
+ if not track_url:
303
+ logger.warning("Request received without 'track_url'.")
304
+ raise HTTPException(status_code=400, detail="Missing 'track_url' in JSON data.")
 
305
 
306
+ # 2. Extract Track ID
307
+ track_id = extract_track_id(track_url)
308
+ if not track_id:
309
+ logger.warning(f"Failed to extract track ID from URL: {track_url}")
310
+ raise HTTPException(status_code=400, detail="Invalid Spotify track URL format or unable to extract ID.")
311
 
312
+ logger.info(f"Processing request for track ID: {track_id}")
313
 
314
+ # 3. Get Spotify Token
315
+ try:
316
+ access_token = get_spotify_token() # Use the existing function
317
+ except HTTPException as he: # Propagate HTTP exceptions from token function
318
+ raise he
319
+ except Exception as e:
320
+ logger.error(f"Unexpected error getting Spotify token: {str(e)}")
321
+ raise HTTPException(status_code=500, detail="Internal error obtaining Spotify access token.")
322
 
 
 
323
 
324
+ # 4. Call Spotify API for Track Info
325
+ headers = {
326
+ 'Authorization': f'Bearer {access_token}'
327
+ }
328
+ # Ensure SPOTIFY_API_URL is the correct base URL (e.g., "https://api.spotify.com/v1")
329
+ track_api_url = f"{SPOTIFY_API_URL}/tracks/{track_id}"
330
+ logger.info(f"Requesting track data from Spotify API: {track_api_url}")
331
 
332
+ try:
333
+ response = requests.get(track_api_url, headers=headers, timeout=15) # Increased timeout slightly
334
+
335
+ # 5. Handle Potential Token Expiry (Retry logic)
336
+ if response.status_code == 401:
337
+ logger.warning("Spotify token likely expired or invalid (received 401). Requesting new one and retrying.")
338
+ try:
339
+ access_token = get_spotify_token() # Force refresh
340
+ except HTTPException as he:
341
+ raise he # Propagate HTTP exceptions from token function
342
+ except Exception as e:
343
+ logger.error(f"Unexpected error getting fresh Spotify token during retry: {str(e)}")
344
+ raise HTTPException(status_code=500, detail="Internal error obtaining fresh Spotify access token.")
345
+
346
+ headers['Authorization'] = f'Bearer {access_token}' # Update header
347
+ response = requests.get(track_api_url, headers=headers, timeout=15) # Retry the request
348
+
349
+ # 6. Handle API Errors after potential retry
350
+ if response.status_code != 200:
351
+ error_detail = f"Spotify API request failed. Status: {response.status_code}, URL: {track_api_url}, Response: {response.text[:200]}..." # Limit response length in log
352
+ logger.error(error_detail)
353
+ # Map Spotify errors to appropriate HTTP status codes
354
+ if response.status_code == 400:
355
+ raise HTTPException(status_code=400, detail=f"Bad request to Spotify API (check track ID format?).")
356
+ elif response.status_code == 404:
357
+ raise HTTPException(status_code=404, detail=f"Track ID '{track_id}' not found on Spotify.")
358
+ else:
359
+ # Use 502 Bad Gateway for upstream errors
360
+ raise HTTPException(status_code=502, detail=f"Failed to retrieve data from Spotify (Status: {response.status_code}).")
361
 
362
+ # 7. Process and Format Response
363
+ track_data = response.json()
 
 
 
364
 
365
+ # Extract desired information safely using .get() with defaults
366
+ artists = [artist.get("name") for artist in track_data.get("artists", []) if artist.get("name")]
367
+ album_images = track_data.get("album", {}).get("images", [])
368
+ cover_url = None
369
+ if len(album_images) > 1:
370
+ cover_url = album_images[1].get("url") # Prefer medium image (index 1)
371
+ elif len(album_images) > 0:
372
+ cover_url = album_images[0].get("url") # Fallback to largest (index 0)
373
+
374
+ track_info = {
375
+ "id": track_data.get("id"),
376
+ "title": track_data.get("name"),
377
+ "artists": artists,
378
+ "album_cover_url": cover_url,
379
+ "duration_ms": track_data.get("duration_ms"),
380
+ "spotify_url": track_data.get("external_urls", {}).get("spotify")
381
+ # Removed spowload_csrf_token from this endpoint's response
382
+ }
383
 
384
+ logger.info(f"Successfully retrieved data for track ID: {track_id}")
385
+ return JSONResponse(content=track_info)
 
 
386
 
387
+ except requests.exceptions.RequestException as e:
388
+ logger.error(f"Network error contacting Spotify API at {track_api_url}: {str(e)}")
389
+ raise HTTPException(status_code=504, detail=f"Gateway timeout or network error contacting Spotify.")
390
+ except Exception as e: # Catch potential JSON parsing errors or other issues
391
+ logger.exception(f"Error processing Spotify response or formatting data for track {track_id}: {str(e)}")
392
+ raise HTTPException(status_code=500, detail="Internal server error processing track data.")
393
 
 
 
394
 
395
+ # 8. General Exception Handling (Catchall)
396
+ except HTTPException as e:
397
+ # Re-raise FastAPI/manual HTTP exceptions so FastAPI handles them
398
+ raise e
399
+ except Exception as e:
400
+ # Log any unexpected errors that weren't caught above
401
+ logger.exception(f"An unexpected critical error occurred in /track endpoint: {str(e)}") # Log full traceback
402
+ raise HTTPException(status_code=500, detail="An unexpected internal server error occurred.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
 
404
 
405
+ # --- MODIFIED /track_dl ENDPOINT ---
406
+ @app.post("/track_dl", response_model=Dict[str, Any]) # Use Dict[str, Any] for flexible response
407
+ async def get_track_download_info(payload: TrackDlRequest):
408
+ """
409
+ Attempts to get download information for a Spotify track via spowload.com,
410
+ using a persistent session for CSRF handling.
411
+
412
+ Expects JSON body: {"spotify_url": "...", "album_cover_url": "..."}
413
+
414
+ Returns:
415
+ The JSON response from spowload.com/convert if successful,
416
+ otherwise an HTTP error response.
417
+ """
418
+ logger.info(f"Received request for /track_dl for URL: {payload.spotify_url}")
419
+
420
+ # 1. Get Session and CSRF Token from Spowload
421
+ session_data = get_spowload_session_and_token()
422
+ if not session_data:
423
+ logger.error("Failed to retrieve session and CSRF token from spowload.com.")
424
+ raise HTTPException(status_code=503, detail="Could not get necessary session/token from the download service.")
425
+
426
+ # Unpack the session and token
427
+ spowload_session, csrf_token = session_data
428
+
429
+ # 2. Prepare request for spowload.com/convert
430
+ convert_url = "https://spowload.com/convert"
431
+ # Headers are now mostly set on the session, but we need to add the CSRF token
432
+ # and ensure Content-Type is set for this specific POST request.
433
+ headers = {
434
+ 'Content-Type': 'application/json',
435
+ 'X-CSRF-Token': csrf_token,
436
+ 'Accept': 'application/json, text/plain, */*', # Override default session Accept for API call
437
+ 'Referer': 'https://spowload.com/', # Keep Referer
438
+ 'Origin': 'https://spowload.com', # Keep Origin
439
+ }
440
+ # Construct the body exactly as specified
441
+ body = {
442
+ "urls": payload.spotify_url,
443
+ "cover": payload.album_cover_url # Use the provided cover URL
444
+ }
445
+
446
+ logger.info(f"Sending request to {convert_url} for Spotify URL: {payload.spotify_url} using established session.")
447
+
448
+ # 3. Make the POST request to spowload.com/convert USING THE SESSION
449
+ try:
450
+ # Use the session object obtained earlier to make the POST request
451
+ # It will automatically send cookies associated with the session
452
+ response = spowload_session.post(convert_url, headers=headers, json=body, timeout=30) # Use session.post
453
+ response.raise_for_status() # Check for 4xx/5xx errors
454
+
455
+ # 4. Process the response
456
  try:
457
+ result = response.json()
458
+ logger.info(f"Successfully received response from {convert_url}.")
459
+ # Add basic check if the result seems valid (depends on spowload's response structure)
460
+ if isinstance(result, dict) and result.get("success"): # Example check
461
+ logger.info("Spowload response indicates success.")
462
+ elif isinstance(result, dict):
463
+ logger.warning(f"Spowload response received but may indicate failure: {result}")
464
+ return JSONResponse(content=result)
465
+ except json.JSONDecodeError:
466
+ logger.error(f"Failed to decode JSON response from {convert_url}. Response text: {response.text[:200]}...")
467
+ raise HTTPException(status_code=502, detail="Received invalid response format from the download service.")
468
+
469
+ except requests.exceptions.Timeout:
470
+ logger.error(f"Request timed out while contacting {convert_url}")
471
+ raise HTTPException(status_code=504, detail="Download service timed out.")
472
+ except requests.exceptions.HTTPError as e:
473
+ # Log specific HTTP errors from spowload
474
+ # Check for 419 specifically now
475
+ if e.response.status_code == 419:
476
+ logger.error(f"Received 419 Page Expired error from {convert_url}. CSRF token or session likely invalid despite using session.")
477
+ raise HTTPException(status_code=419, detail="Download service reported 'Page Expired' (CSRF/Session issue).")
478
+ logger.error(f"HTTP error {e.response.status_code} received from {convert_url}. Response: {e.response.text[:200]}...")
479
+ # Pass a more specific error back to the client if possible
480
+ if e.response.status_code == 429: # Too Many Requests
481
+ raise HTTPException(status_code=429, detail="Rate limited by the download service. Try again later.")
482
+ elif e.response.status_code == 403: # Forbidden
483
+ raise HTTPException(status_code=403, detail="Request forbidden by the download service.")
484
+ else:
485
+ raise HTTPException(status_code=502, detail=f"Download service returned an error (Status: {e.response.status_code}).")
486
+ except requests.exceptions.RequestException as e:
487
+ logger.error(f"Network error contacting {convert_url}: {str(e)}")
488
+ raise HTTPException(status_code=502, detail="Network error communicating with the download service.")
489
+ except Exception as e:
490
+ logger.exception(f"An unexpected error occurred during /track_dl processing: {str(e)}")
491
+ raise HTTPException(status_code=500, detail="An unexpected internal server error occurred.")
492
+ finally:
493
+ # Close the session to release resources (optional but good practice)
494
+ if 'spowload_session' in locals():
495
+ spowload_session.close()
496
 
497
 
 
 
 
 
 
 
 
 
 
 
 
498
 
499
  @app.get("/")
500
  async def health_check():