Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,8 @@ import asyncio
|
|
9 |
import cloudscraper
|
10 |
from pydantic import BaseModel
|
11 |
from urllib.parse import urlparse, parse_qs
|
|
|
|
|
12 |
|
13 |
app = Flask(__name__)
|
14 |
ytmusic = YTMusic()
|
@@ -162,7 +164,7 @@ api_rotator = ApiRotator([
|
|
162 |
|
163 |
|
164 |
|
165 |
-
async def get_track_download_url(track_id: str) -> str:
|
166 |
apis = api_rotator.get_prioritized_apis()
|
167 |
session = cloudscraper.create_scraper() # Requires cloudscraper package
|
168 |
headers = {
|
@@ -178,7 +180,7 @@ async def get_track_download_url(track_id: str) -> str:
|
|
178 |
response = session.post(
|
179 |
api_url,
|
180 |
timeout=20,
|
181 |
-
json={"url": y_url, "audioFormat": "mp3", "downloadMode": "audio"},
|
182 |
headers=headers
|
183 |
)
|
184 |
logger.info(f"Response status: {response.status_code}")
|
@@ -206,18 +208,93 @@ async def get_track_download_url(track_id: str) -> str:
|
|
206 |
|
207 |
|
208 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
@app.route('/track_dl', methods=['POST'])
|
210 |
async def track_dl():
|
211 |
-
|
212 |
-
dl_url = await get_track_download_url(track_id)
|
213 |
-
if dl_url and "http" in dl_url:
|
214 |
-
result = {"url": dl_url}
|
215 |
-
return jsonify(result)
|
216 |
-
else:
|
217 |
-
return {
|
218 |
-
"error": "Failed to Fetch the Track."
|
219 |
-
}
|
220 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
|
223 |
|
|
|
9 |
import cloudscraper
|
10 |
from pydantic import BaseModel
|
11 |
from urllib.parse import urlparse, parse_qs
|
12 |
+
from collections import defaultdict
|
13 |
+
import threading
|
14 |
|
15 |
app = Flask(__name__)
|
16 |
ytmusic = YTMusic()
|
|
|
164 |
|
165 |
|
166 |
|
167 |
+
async def get_track_download_url(track_id: str, quality: str) -> str:
|
168 |
apis = api_rotator.get_prioritized_apis()
|
169 |
session = cloudscraper.create_scraper() # Requires cloudscraper package
|
170 |
headers = {
|
|
|
180 |
response = session.post(
|
181 |
api_url,
|
182 |
timeout=20,
|
183 |
+
json={"url": y_url, "audioFormat": "mp3", "downloadMode": "audio", "audioBitrate": quality},
|
184 |
headers=headers
|
185 |
)
|
186 |
logger.info(f"Response status: {response.status_code}")
|
|
|
208 |
|
209 |
|
210 |
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
|
215 |
+
# In-memory storage for request counting
|
216 |
+
request_counts = defaultdict(list)
|
217 |
+
request_lock = threading.Lock()
|
218 |
+
DAILY_LIMIT = 20
|
219 |
+
|
220 |
+
def cleanup_old_records():
|
221 |
+
"""Remove records older than 24 hours"""
|
222 |
+
today = datetime.now()
|
223 |
+
with request_lock:
|
224 |
+
for ip in list(request_counts.keys()):
|
225 |
+
request_counts[ip] = [
|
226 |
+
(timestamp, count)
|
227 |
+
for timestamp, count in request_counts[ip]
|
228 |
+
if (today - timestamp).days < 1
|
229 |
+
]
|
230 |
+
if not request_counts[ip]:
|
231 |
+
del request_counts[ip]
|
232 |
+
|
233 |
+
def get_daily_requests(ip):
|
234 |
+
"""Get the number of requests made today by this IP"""
|
235 |
+
cleanup_old_records()
|
236 |
+
today = datetime.now()
|
237 |
+
with request_lock:
|
238 |
+
return sum(
|
239 |
+
count for timestamp, count in request_counts[ip]
|
240 |
+
if (today - timestamp).days < 1
|
241 |
+
)
|
242 |
+
|
243 |
+
def increment_daily_requests(ip):
|
244 |
+
"""Increment the request counter for this IP"""
|
245 |
+
now = datetime.now()
|
246 |
+
with request_lock:
|
247 |
+
request_counts[ip].append((now, 1))
|
248 |
+
|
249 |
@app.route('/track_dl', methods=['POST'])
|
250 |
async def track_dl():
|
251 |
+
client_ip = request.remote_addr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
+
# Check current usage
|
254 |
+
current_requests = get_daily_requests(client_ip)
|
255 |
+
remaining_requests = DAILY_LIMIT - current_requests
|
256 |
+
|
257 |
+
if remaining_requests <= 0:
|
258 |
+
return jsonify({
|
259 |
+
"error": "Daily limit of 20 downloads exceeded. Get Premium or try again tomorrow.",
|
260 |
+
"remaining_requests": 0
|
261 |
+
}), 429
|
262 |
+
|
263 |
+
data = request.get_json()
|
264 |
+
track_id = data.get('track_id')
|
265 |
+
quality = data.get('quality', '128')
|
266 |
+
|
267 |
+
try:
|
268 |
+
quality_num = int(quality)
|
269 |
+
if quality_num > 128 or quality.upper() == 'FLAC':
|
270 |
+
return jsonify({
|
271 |
+
"error": "Quality above 128 or FLAC is for Premium users Only.",
|
272 |
+
"remaining_requests": remaining_requests
|
273 |
+
}), 400
|
274 |
+
|
275 |
+
dl_url = await get_track_download_url(track_id, quality)
|
276 |
+
|
277 |
+
if dl_url and "http" in dl_url:
|
278 |
+
# Increment counter only on successful download
|
279 |
+
increment_daily_requests(client_ip)
|
280 |
+
remaining_requests -= 1 # Decrease remaining count after successful increment
|
281 |
+
|
282 |
+
result = {
|
283 |
+
"url": dl_url,
|
284 |
+
"remaining_requests": remaining_requests
|
285 |
+
}
|
286 |
+
return jsonify(result)
|
287 |
+
else:
|
288 |
+
return jsonify({
|
289 |
+
"error": "Failed to Fetch the Track.",
|
290 |
+
"remaining_requests": remaining_requests
|
291 |
+
}), 400
|
292 |
+
|
293 |
+
except ValueError:
|
294 |
+
return jsonify({
|
295 |
+
"error": "Invalid quality value provided. It should be a valid integer or FLAC.",
|
296 |
+
"remaining_requests": remaining_requests
|
297 |
+
}), 400
|
298 |
|
299 |
|
300 |
|