zhepama commited on
Commit
9ec01a2
·
verified ·
1 Parent(s): 340ced9

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +24 -10
main.py CHANGED
@@ -44,18 +44,19 @@ async def proxy(request: Request, path: str, target_url: Optional[str] = None):
44
  if not target_url:
45
  raise HTTPException(status_code=400, detail="必须提供目标URL")
46
 
47
- # 获取原始请求头
48
- headers = dict(request.headers)
49
- # 移除可能导致问题的头
50
- headers.pop("host", None)
51
- headers.pop("content-length", None)
52
-
53
  # 检查是否请求流式响应
54
  stream_request = "stream" in request.query_params and request.query_params["stream"].lower() in ["true", "1", "yes"]
55
 
56
  # 创建带有代理的 scraper
57
  # 创建cloudscraper实例
58
  scraper = cloudscraper.create_scraper()
 
 
 
 
 
 
59
  # 检查环境变量PROXY是否存在
60
  proxy = os.environ.get('PROXY')
61
  if proxy:
@@ -68,7 +69,7 @@ async def proxy(request: Request, path: str, target_url: Optional[str] = None):
68
  response = scraper.get('https://httpbin.org/ip')
69
  print(response.text)
70
 
71
- print(f"{headers}")
72
 
73
  # 获取请求体
74
  body = await request.body()
@@ -79,12 +80,25 @@ async def proxy(request: Request, path: str, target_url: Optional[str] = None):
79
  params.pop("url", None)
80
  params.pop("stream", None)
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  # 构建请求参数
83
  request_kwargs = {
84
  "url": target_url,
85
- "headers": {
86
- "sec-fetch-dest": "document",
87
- },
88
  "params": params,
89
  "stream": stream_request # 设置stream参数
90
  }
 
44
  if not target_url:
45
  raise HTTPException(status_code=400, detail="必须提供目标URL")
46
 
47
+
 
 
 
 
 
48
  # 检查是否请求流式响应
49
  stream_request = "stream" in request.query_params and request.query_params["stream"].lower() in ["true", "1", "yes"]
50
 
51
  # 创建带有代理的 scraper
52
  # 创建cloudscraper实例
53
  scraper = cloudscraper.create_scraper()
54
+
55
+ # 从请求中获取cookies并设置到scraper
56
+ cookies = request.cookies
57
+ for key, value in cookies.items():
58
+ scraper.cookies.set(key, value)
59
+
60
  # 检查环境变量PROXY是否存在
61
  proxy = os.environ.get('PROXY')
62
  if proxy:
 
69
  response = scraper.get('https://httpbin.org/ip')
70
  print(response.text)
71
 
72
+
73
 
74
  # 获取请求体
75
  body = await request.body()
 
80
  params.pop("url", None)
81
  params.pop("stream", None)
82
 
83
+
84
+ # 获取原始请求头
85
+ headers = dict(request.headers)
86
+ # 移除可能导致问题的头
87
+ headers.pop("host", None)
88
+ headers.pop("content-length", None)
89
+ headers.pop("cookie", None)
90
+ headers.pop("x-forwarded-for", None)
91
+ headers.pop("x-forwarded-proto", None)
92
+ headers.pop("x-forwarded-port", None)
93
+ headers.pop("x-amzn-trace-id", None)
94
+ headers.pop("x-request-id", None)
95
+ headers.pop("x-ip-token", None)
96
+ headers.pop("x-direct-url", None)
97
+ print(f"{headers}")
98
  # 构建请求参数
99
  request_kwargs = {
100
  "url": target_url,
101
+ "headers": headers,
 
 
102
  "params": params,
103
  "stream": stream_request # 设置stream参数
104
  }