Spaces:
Running
Running
File size: 17,688 Bytes
c5cc040 becf3a8 43f369c becf3a8 be63dad becf3a8 43f369c becf3a8 43f369c 9cf7a5c 43f369c 9cf7a5c becf3a8 799a782 becf3a8 9cf7a5c becf3a8 9cf7a5c becf3a8 c5cc040 becf3a8 4d34d9b 0d6e70c 2e466ee baa8d8f 2e466ee 4d34d9b 9ec01a2 c5cc040 be63dad 340ced9 be63dad baa8d8f be63dad 2e466ee baa8d8f be63dad b9120d1 be63dad baa8d8f 1ea6a48 be63dad 9779927 becf3a8 9779927 9ec01a2 799a782 eaec147 9ec01a2 bc2e131 eaec147 9779927 9ec01a2 be63dad becf3a8 be63dad becf3a8 9cf7a5c becf3a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 |
import os
import cloudscraper
from fastapi import FastAPI, HTTPException, Request, Response
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse, StreamingResponse
from typing import Optional
import uvicorn
import asyncio
from urllib.parse import urlparse
import time
app = FastAPI(
title="ScraperProxy",
description="一个使用CloudScraper进行请求转发的代理,支持流式响应",
version="0.1.0"
)
# 添加CORS中间件
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
async def stream_generator(response):
"""生成流式响应的生成器函数"""
for chunk in response.iter_content(chunk_size=8192):
if chunk:
yield chunk
await asyncio.sleep(0.001) # 让出控制权,保持异步特性
# 读取 HTML 模板
def get_html_template():
# 这里可以从文件读取 HTML,或者直接返回上面的 HTML 字符串
# 为了简化示例,我们直接返回一个字符串变量
html_content = """<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>ScraperProxy API - 网页请求代理服务</title>
<style>
:root {
--primary-color: #3498db;
--secondary-color: #2980b9;
--accent-color: #e74c3c;
--text-color: #333;
--light-bg: #f5f7fa;
--card-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
line-height: 1.6;
color: var(--text-color);
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
min-height: 100vh;
padding: 20px;
}
.container {
max-width: 1200px;
margin: 0 auto;
padding: 20px;
}
header {
text-align: center;
margin-bottom: 40px;
padding: 20px;
background-color: white;
border-radius: 10px;
box-shadow: var(--card-shadow);
}
h1 {
color: var(--primary-color);
margin-bottom: 10px;
font-size: 2.5rem;
}
.subtitle {
font-size: 1.2rem;
color: #666;
margin-bottom: 20px;
}
.features {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 20px;
margin-bottom: 40px;
}
.feature-card {
background-color: white;
padding: 25px;
border-radius: 10px;
box-shadow: var(--card-shadow);
transition: transform 0.3s ease;
}
.feature-card:hover {
transform: translateY(-5px);
}
.feature-card h3 {
color: var(--primary-color);
margin-bottom: 15px;
font-size: 1.4rem;
}
.code-section {
background-color: white;
padding: 30px;
border-radius: 10px;
box-shadow: var(--card-shadow);
margin-bottom: 40px;
}
.code-block {
background-color: #282c34;
color: #abb2bf;
padding: 20px;
border-radius: 6px;
overflow-x: auto;
font-family: 'Courier New', Courier, monospace;
margin: 15px 0;
white-space: pre-wrap;
}
.code-title {
margin-bottom: 15px;
color: var(--primary-color);
font-size: 1.3rem;
}
.button {
display: inline-block;
background-color: var(--primary-color);
color: white;
padding: 12px 24px;
border-radius: 6px;
text-decoration: none;
font-weight: bold;
transition: background-color 0.3s ease;
margin: 10px 5px;
}
.button:hover {
background-color: var(--secondary-color);
}
.button.accent {
background-color: var(--accent-color);
}
.button.accent:hover {
background-color: #c0392b;
}
footer {
text-align: center;
margin-top: 40px;
padding: 20px;
color: #666;
}
.try-it-section {
background-color: white;
padding: 30px;
border-radius: 10px;
box-shadow: var(--card-shadow);
margin-bottom: 40px;
}
.input-group {
margin-bottom: 20px;
}
.input-group label {
display: block;
margin-bottom: 8px;
font-weight: bold;
}
.input-group input[type="text"] {
width: 100%;
padding: 12px;
border: 1px solid #ddd;
border-radius: 6px;
font-size: 16px;
}
.checkbox-group {
margin: 15px 0;
}
#response-container {
background-color: #f5f5f5;
padding: 20px;
border-radius: 6px;
min-height: 100px;
margin-top: 20px;
white-space: pre-wrap;
display: none;
}
</style>
</head>
<body>
<div class="container">
<header>
<h1>ScraperProxy API</h1>
<p class="subtitle">强大的网页请求代理服务,轻松绕过访问限制</p>
<div>
<a href="/docs" class="button">API 文档</a>
<a href="#try-it" class="button accent">立即尝试</a>
</div>
</header>
<div class="features">
<div class="feature-card">
<h3>绕过访问限制</h3>
<p>使用 cloudscraper 技术,轻松绕过常见的网站防护机制,如 Cloudflare 的反爬虫保护。</p>
</div>
<div class="feature-card">
<h3>支持流式响应</h3>
<p>通过流式响应处理大型数据,保持连接稳定,实现更高效的数据传输。</p>
</div>
<div class="feature-card">
<h3>简单易用</h3>
<p>简洁的 API 设计,只需一个 URL 参数即可使用,支持多种请求方法和自定义选项。</p>
</div>
</div>
<div class="code-section">
<h2 class="code-title">快速开始</h2>
<p>使用我们的代理服务非常简单,只需发送请求到以下端点:</p>
<div class="code-block">
# 基本用法
GET /proxy?url=https://example.com
# 启用流式响应
GET /proxy?url=https://example.com&stream=true
# 自定义请求方法和头信息
POST /proxy
{
"url": "https://example.com",
"method": "POST",
"headers": {"Custom-Header": "Value"},
"data": {"key": "value"},
"stream": true
}
</div>
</div>
<div class="try-it-section" id="try-it">
<h2 class="code-title">立即尝试</h2>
<div class="input-group">
<label for="url-input">输入要请求的 URL:</label>
<input type="text" id="url-input" placeholder="https://example.com" value="https://example.com">
</div>
<div class="checkbox-group">
<input type="checkbox" id="stream-checkbox" checked>
<label for="stream-checkbox">启用流式响应</label>
</div>
<button id="send-request" class="button">发送请求</button>
<div id="response-container"></div>
</div>
</div>
<footer>
<p>© 2025 ScraperProxy API. 所有权利保留。</p>
</footer>
<script>
document.getElementById('send-request').addEventListener('click', async function() {
const url = document.getElementById('url-input').value;
const streamEnabled = document.getElementById('stream-checkbox').checked;
const responseContainer = document.getElementById('response-container');
if (!url) {
alert('请输入有效的 URL');
return;
}
responseContainer.style.display = 'block';
responseContainer.textContent = '正在加载...';
try {
const proxyUrl = `/proxy?url=${encodeURIComponent(url)}&stream=${streamEnabled}`;
if (streamEnabled) {
responseContainer.textContent = '';
const response = await fetch(proxyUrl);
const reader = response.body.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const text = new TextDecoder().decode(value);
responseContainer.textContent += text;
}
} else {
const response = await fetch(proxyUrl);
const data = await response.text();
responseContainer.textContent = data;
}
} catch (error) {
responseContainer.textContent = `错误: ${error.message}`;
}
});
</script>
</body>
</html>
"""
return html_content
@app.get("/", response_class=HTMLResponse)
async def root():
return get_html_template()
@app.api_route("/proxy", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH"])
async def proxy(request: Request):
"""
通用代理端点,转发所有请求到目标URL,支持流式响应
"""
try:
# 获取环境变量中的token
env_token = os.environ.get('TOKEN')
if env_token:
# 从请求头获取Authorization
auth_header = request.headers.get('Authorization')
if not auth_header or not auth_header.startswith('Bearer '):
raise HTTPException(
status_code=401,
detail="未提供有效的Authorization header",
headers={"WWW-Authenticate": "Bearer"}
)
# 提取Bearer token
token = auth_header.split(' ')[1]
# 验证token
if token != env_token:
raise HTTPException(
status_code=403,
detail="Token无效"
)
# 获取请求方法
method = request.method
target_url = request.query_params.get("url")
if not target_url:
raise HTTPException(status_code=400, detail="必须提供目标URL")
# 检查是否请求流式响应
stream_request = "stream" in request.query_params and request.query_params["stream"].lower() in ["true", "1", "yes"]
# 创建带有代理的 scraper
# 创建cloudscraper实例
scraper = cloudscraper.create_scraper(
# browser={
# 'browser': 'chrome',
# 'platform': 'windows',
# 'mobile': False
# },
# captcha={
# 'provider': '2captcha',
# 'api_key': ' '
# },
#debug=True,
delay=10
)
# 从请求中获取cookies并设置到scraper
cookies = request.cookies
for key, value in cookies.items():
scraper.cookies.set(key, value)
# 检查环境变量PROXY是否存在
proxy = os.environ.get('PROXY')
if proxy:
# 如果环境变量存在,则设置代理
scraper.proxies = {
'http': proxy,
'https': proxy
}
# 测试代理是否生效
# response = scraper.get('https://httpbin.org/ip')
# print(response.text)
# 获取home_url
home_url = request.query_params.get("home")
if not home_url:
# 从target_url中提取home_url
parsed_url = urlparse(target_url)
home_url = f"{parsed_url.scheme}://{parsed_url.netloc}/"
# 重试获取主页响应
max_retries = 3
retry_delay = 1 # 重试间隔秒数
home_response = None
for attempt in range(max_retries):
try:
home_response = scraper.get(home_url, headers= {
"sec-fetch-dest": "document"
})
print(f"主页{home_url}响应 (尝试 {attempt + 1}): {home_response.status_code}")
if home_response.status_code == 200:
break
if attempt < max_retries - 1: # 如果不是最后一次尝试
time.sleep(retry_delay)
except Exception as e:
print(f"主页请求失败 (尝试 {attempt + 1}): {str(e)}")
if attempt < max_retries - 1:
time.sleep(retry_delay)
# 获取请求体
body = await request.body()
# 获取查询参数
params = dict(request.query_params)
# 从查询参数中移除url和stream参数
params.pop("url", None)
params.pop("stream", None)
# 获取原始请求头
headers = dict(request.headers)
# 移除可能导致问题的头
headers.pop("host", None)
headers.pop("authorization", None)
headers.pop("cookie", None)
headers.pop("x-forwarded-for", None)
headers.pop("x-forwarded-proto", None)
headers.pop("x-forwarded-port", None)
headers.pop("x-amzn-trace-id", None)
headers.pop("x-request-id", None)
headers.pop("x-ip-token", None)
headers.pop("x-direct-url", None)
headers.pop("x-direct-url", None)
headers.pop("accept", None)
headers.pop("accept-language", None)
headers.pop("accept-encoding", None)
headers.pop("content-type", None)
headers.pop("content-length", None)
headers.pop("user-agent", None)
print(f"{headers}")
# 构建请求参数
request_kwargs = {
"url": target_url,
"headers": {"sec-fetch-dest": "document"},
"params": params,
"stream": stream_request # 设置stream参数
}
# 如果有请求体,添加到请求参数中
if body:
request_kwargs["data"] = body
# 发送请求
if method == "GET":
response = scraper.get(**request_kwargs)
elif method == "POST":
response = scraper.post(**request_kwargs)
elif method == "PUT":
response = scraper.put(**request_kwargs)
elif method == "DELETE":
response = scraper.delete(**request_kwargs)
elif method == "HEAD":
response = scraper.head(**request_kwargs)
elif method == "OPTIONS":
response = scraper.options(**request_kwargs)
elif method == "PATCH":
response = scraper.patch(**request_kwargs)
else:
raise HTTPException(status_code=405, detail=f"不支持的方法: {method}")
# 处理流式响应
if stream_request:
# 创建响应头字典
headers_dict = {}
for header_name, header_value in response.headers.items():
if header_name.lower() not in ('content-encoding', 'transfer-encoding', 'content-length'):
headers_dict[header_name] = header_value
# 返回流式响应
return StreamingResponse(
stream_generator(response),
status_code=response.status_code,
headers=headers_dict,
media_type=response.headers.get("content-type", "application/octet-stream")
)
else:
# 创建普通响应
proxy_response = Response(
content=response.content,
status_code=response.status_code,
)
# 转发响应头
for header_name, header_value in response.headers.items():
if header_name.lower() not in ('content-encoding', 'transfer-encoding', 'content-length'):
proxy_response.headers[header_name] = header_value
# 转发cookies
for cookie_name, cookie_value in response.cookies.items():
proxy_response.set_cookie(key=cookie_name, value=cookie_value)
return proxy_response
except Exception as e:
raise HTTPException(status_code=500, detail=f"代理请求失败: {str(e)}")
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=7860, reload=True)
|