sanbo
commited on
Commit
·
08ca036
1
Parent(s):
5e28307
update sth. at 2025-01-20 14:08:58
Browse files- .dockerignore +9 -0
- .env.example +4 -0
- .github/workflows/build_docker_dev.yml +70 -0
- .github/workflows/build_docker_main.yml +70 -0
- .gitignore +8 -0
- Dockerfile +11 -0
- LICENSE +21 -0
- README——xxx.md +217 -0
- api/chat2api.py +124 -0
- api/files.py +144 -0
- api/models.py +28 -0
- api/tokens.py +86 -0
- app.py +57 -0
- chatgpt/ChatService.py +530 -0
- chatgpt/authorization.py +78 -0
- chatgpt/chatFormat.py +436 -0
- chatgpt/chatLimit.py +34 -0
- chatgpt/fp.py +61 -0
- chatgpt/proofofWork.py +504 -0
- chatgpt/refreshToken.py +57 -0
- chatgpt/turnstile.py +268 -0
- chatgpt/wssClient.py +36 -0
- docker-compose-warp.yml +56 -0
- docker-compose.yml +22 -0
- gateway/admin.py +0 -0
- gateway/backend.py +381 -0
- gateway/chatgpt.py +32 -0
- gateway/gpts.py +24 -0
- gateway/login.py +10 -0
- gateway/reverseProxy.py +293 -0
- gateway/route.py +0 -0
- gateway/share.py +251 -0
- gateway/v1.py +33 -0
- requirements.txt +13 -0
- templates/chatgpt.html +385 -0
- templates/chatgpt_context.json +0 -0
- templates/gpts_context.json +0 -0
- templates/login.html +82 -0
- templates/tokens.html +82 -0
- utils/Client.py +56 -0
- utils/Logger.py +24 -0
- utils/configs.py +106 -0
- utils/globals.py +107 -0
- utils/kv_utils.py +10 -0
- utils/retry.py +32 -0
- version.txt +1 -0
.dockerignore
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
*.pyc
|
3 |
+
/.git/
|
4 |
+
/.idea/
|
5 |
+
/docs/
|
6 |
+
/tmp/
|
7 |
+
/data/
|
8 |
+
/.venv/
|
9 |
+
/.vscode/
|
.env.example
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
API_PREFIX=your_prefix
|
2 |
+
CHATGPT_BASE_URL=https://chatgpt.com
|
3 |
+
PROXY_URL=your_first_proxy, your_second_proxy
|
4 |
+
SCHEDULED_REFRESH=false
|
.github/workflows/build_docker_dev.yml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Build Docker Image (dev)
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- dev
|
7 |
+
paths-ignore:
|
8 |
+
- 'README.md'
|
9 |
+
- 'docker-compose.yml'
|
10 |
+
- 'docker-compose-warp.yml'
|
11 |
+
- 'docs/**'
|
12 |
+
- '.github/workflows/build_docker_main.yml'
|
13 |
+
- '.github/workflows/build_docker_dev.yml'
|
14 |
+
workflow_dispatch:
|
15 |
+
|
16 |
+
jobs:
|
17 |
+
main:
|
18 |
+
runs-on: ubuntu-latest
|
19 |
+
|
20 |
+
steps:
|
21 |
+
- name: Check out the repository
|
22 |
+
uses: actions/checkout@v2
|
23 |
+
|
24 |
+
- name: Read the version from version.txt
|
25 |
+
id: get_version
|
26 |
+
run: |
|
27 |
+
version=$(cat version.txt)
|
28 |
+
echo "Current version: v$version-dev"
|
29 |
+
echo "::set-output name=version::v$version-dev"
|
30 |
+
|
31 |
+
- name: Commit and push version tag
|
32 |
+
env:
|
33 |
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
34 |
+
run: |
|
35 |
+
version=${{ steps.get_version.outputs.version }}
|
36 |
+
git config --local user.email "[email protected]"
|
37 |
+
git config --local user.name "GitHub Action"
|
38 |
+
git tag "$version"
|
39 |
+
git push https://x-access-token:${GHCR_PAT}@github.com/lanqian528/chat2api.git "$version"
|
40 |
+
|
41 |
+
- name: Set up QEMU
|
42 |
+
uses: docker/setup-qemu-action@v3
|
43 |
+
|
44 |
+
- name: Set up Docker Buildx
|
45 |
+
uses: docker/setup-buildx-action@v3
|
46 |
+
|
47 |
+
- name: Log in to Docker Hub
|
48 |
+
uses: docker/login-action@v3
|
49 |
+
with:
|
50 |
+
username: ${{ secrets.DOCKER_USERNAME }}
|
51 |
+
password: ${{ secrets.DOCKER_PASSWORD }}
|
52 |
+
|
53 |
+
- name: Docker meta
|
54 |
+
id: meta
|
55 |
+
uses: docker/metadata-action@v5
|
56 |
+
with:
|
57 |
+
images: lanqian528/chat2api
|
58 |
+
tags: |
|
59 |
+
type=raw,value=latest-dev
|
60 |
+
type=raw,value=${{ steps.get_version.outputs.version }}
|
61 |
+
|
62 |
+
- name: Build and push
|
63 |
+
uses: docker/build-push-action@v5
|
64 |
+
with:
|
65 |
+
context: .
|
66 |
+
platforms: linux/amd64,linux/arm64
|
67 |
+
file: Dockerfile
|
68 |
+
push: true
|
69 |
+
tags: ${{ steps.meta.outputs.tags }}
|
70 |
+
labels: ${{ steps.meta.outputs.labels }}
|
.github/workflows/build_docker_main.yml
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Build Docker Image
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths-ignore:
|
8 |
+
- 'README.md'
|
9 |
+
- 'docker-compose.yml'
|
10 |
+
- 'docker-compose-warp.yml'
|
11 |
+
- 'docs/**'
|
12 |
+
- '.github/workflows/build_docker_main.yml'
|
13 |
+
- '.github/workflows/build_docker_dev.yml'
|
14 |
+
workflow_dispatch:
|
15 |
+
|
16 |
+
jobs:
|
17 |
+
main:
|
18 |
+
runs-on: ubuntu-latest
|
19 |
+
|
20 |
+
steps:
|
21 |
+
- name: Check out the repository
|
22 |
+
uses: actions/checkout@v2
|
23 |
+
|
24 |
+
- name: Read the version from version.txt
|
25 |
+
id: get_version
|
26 |
+
run: |
|
27 |
+
version=$(cat version.txt)
|
28 |
+
echo "Current version: v$version"
|
29 |
+
echo "::set-output name=version::v$version"
|
30 |
+
|
31 |
+
- name: Commit and push version tag
|
32 |
+
env:
|
33 |
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
34 |
+
run: |
|
35 |
+
version=${{ steps.get_version.outputs.version }}
|
36 |
+
git config --local user.email "[email protected]"
|
37 |
+
git config --local user.name "GitHub Action"
|
38 |
+
git tag "$version"
|
39 |
+
git push https://x-access-token:${GHCR_PAT}@github.com/lanqian528/chat2api.git "$version"
|
40 |
+
|
41 |
+
- name: Set up QEMU
|
42 |
+
uses: docker/setup-qemu-action@v3
|
43 |
+
|
44 |
+
- name: Set up Docker Buildx
|
45 |
+
uses: docker/setup-buildx-action@v3
|
46 |
+
|
47 |
+
- name: Log in to Docker Hub
|
48 |
+
uses: docker/login-action@v3
|
49 |
+
with:
|
50 |
+
username: ${{ secrets.DOCKER_USERNAME }}
|
51 |
+
password: ${{ secrets.DOCKER_PASSWORD }}
|
52 |
+
|
53 |
+
- name: Docker meta
|
54 |
+
id: meta
|
55 |
+
uses: docker/metadata-action@v5
|
56 |
+
with:
|
57 |
+
images: lanqian528/chat2api
|
58 |
+
tags: |
|
59 |
+
type=raw,value=latest,enable={{is_default_branch}}
|
60 |
+
type=raw,value=${{ steps.get_version.outputs.version }}
|
61 |
+
|
62 |
+
- name: Build and push
|
63 |
+
uses: docker/build-push-action@v5
|
64 |
+
with:
|
65 |
+
context: .
|
66 |
+
platforms: linux/amd64,linux/arm64
|
67 |
+
file: Dockerfile
|
68 |
+
push: true
|
69 |
+
tags: ${{ steps.meta.outputs.tags }}
|
70 |
+
labels: ${{ steps.meta.outputs.labels }}
|
.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
*.pyc
|
3 |
+
/.git/
|
4 |
+
/.idea/
|
5 |
+
/tmp/
|
6 |
+
/data/
|
7 |
+
/.venv/
|
8 |
+
/.vscode/
|
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
COPY . /app
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
8 |
+
|
9 |
+
EXPOSE 5005
|
10 |
+
|
11 |
+
CMD ["python", "app.py"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 aurora-develop
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README——xxx.md
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CHAT2API
|
2 |
+
|
3 |
+
🤖 一个简单的 ChatGPT TO API 代理
|
4 |
+
|
5 |
+
🌟 无需账号即可使用免费、无限的 `GPT-3.5`
|
6 |
+
|
7 |
+
💥 支持 AccessToken 使用账号,支持 `O1-Preview/mini`、`GPT-4`、`GPT-4o/mini`、 `GPTs`
|
8 |
+
|
9 |
+
🔍 回复格式与真实 API 完全一致,适配几乎所有客户端
|
10 |
+
|
11 |
+
👮 配套用户管理端[Chat-Share](https://github.com/h88782481/Chat-Share)使用前需提前配置好环境变量(ENABLE_GATEWAY设置为True,AUTO_SEED设置为False)
|
12 |
+
|
13 |
+
|
14 |
+
## 交流群
|
15 |
+
|
16 |
+
[https://t.me/chat2api](https://t.me/chat2api)
|
17 |
+
|
18 |
+
要提问请先阅读完仓库文档,尤其是常见问题部分。
|
19 |
+
|
20 |
+
提问时请提供:
|
21 |
+
|
22 |
+
1. 启动日志截图(敏感信息打码,包括环境变量和版本号)
|
23 |
+
2. 报错的日志信息(敏感信息打码)
|
24 |
+
3. 接口返回的状态码和响应体
|
25 |
+
|
26 |
+
## 赞助商
|
27 |
+
|
28 |
+
感谢 Capsolver 对本项目的赞助,对于市面上任何人机验证码,你可以使用 [Capsolver](https://www.capsolver.com/zh?utm_source=github&utm_medium=repo&utm_campaign=scraping&utm_term=chat2api) 来解决
|
29 |
+
|
30 |
+
[](https://www.capsolver.com/zh?utm_source=github&utm_medium=repo&utm_campaign=scraping&utm_term=chat2api)
|
31 |
+
|
32 |
+
## 功能
|
33 |
+
|
34 |
+
### 最新版本号存于 `version.txt`
|
35 |
+
|
36 |
+
### 逆向API 功能
|
37 |
+
> - [x] 流式、非流式传输
|
38 |
+
> - [x] 免登录 GPT-3.5 对话
|
39 |
+
> - [x] GPT-3.5 模型对话(传入模型名不包含 gpt-4,则默认使用 gpt-3.5,也就是 text-davinci-002-render-sha)
|
40 |
+
> - [x] GPT-4 系列模型对话(传入模型名包含: gpt-4,gpt-4o,gpt-4o-mini,gpt-4-moblie 即可使用对应模型,需传入 AccessToken)
|
41 |
+
> - [x] O1 系列模型对话(传入模型名包含 o1-preview,o1-mini 即可使用对应模型,需传入 AccessToken)
|
42 |
+
> - [x] GPT-4 模型画图、代码、联网
|
43 |
+
> - [x] 支持 GPTs(传入模型名:gpt-4-gizmo-g-*)
|
44 |
+
> - [x] 支持 Team Plus 账号(需传入 team account id)
|
45 |
+
> - [x] 上传图片、文件(格式为 API 对应格式,支持 URL 和 base64)
|
46 |
+
> - [x] 可作为网关使用,可多机分布部署
|
47 |
+
> - [x] 多账号轮询,同时支持 `AccessToken` 和 `RefreshToken`
|
48 |
+
> - [x] 请求失败重试,自动轮询下一个 Token
|
49 |
+
> - [x] Tokens 管理,支持上传、清除
|
50 |
+
> - [x] 定时使用 `RefreshToken` 刷新 `AccessToken` / 每次启动将会全部非强制刷新一次,每4天晚上3点全部强制刷新一次。
|
51 |
+
> - [x] 支持文件下载,需要开启历史记录
|
52 |
+
> - [x] 支持 `O1-Preview/mini` 模型推理过程输出
|
53 |
+
|
54 |
+
### 官网镜像 功能
|
55 |
+
> - [x] 支持官网原生镜像
|
56 |
+
> - [x] 后台账号池随机抽取,`Seed` 设置随机账号
|
57 |
+
> - [x] 输入 `RefreshToken` 或 `AccessToken` 直接登录使用
|
58 |
+
> - [x] 支持 O1-Preview/mini、GPT-4、GPT-4o/mini
|
59 |
+
> - [x] 敏感信息接口禁用、部分设置接口禁用
|
60 |
+
> - [x] /login 登录页面,注销后自动跳转到登录页面
|
61 |
+
> - [x] /?token=xxx 直接登录, xxx 为 `RefreshToken` 或 `AccessToken` 或 `SeedToken` (随机种子)
|
62 |
+
|
63 |
+
|
64 |
+
> TODO
|
65 |
+
> - [ ] 镜像支持 `GPTs`
|
66 |
+
> - [ ] 暂无,欢迎提 `issue`
|
67 |
+
|
68 |
+
## 逆向API
|
69 |
+
|
70 |
+
完全 `OpenAI` 格式的 API ,支持传入 `AccessToken` 或 `RefreshToken`,可用 GPT-4, GPT-4o, GPTs, O1-Preview, O1-Mini:
|
71 |
+
|
72 |
+
```bash
|
73 |
+
curl --location 'http://127.0.0.1:5005/v1/chat/completions' \
|
74 |
+
--header 'Content-Type: application/json' \
|
75 |
+
--header 'Authorization: Bearer {{Token}}' \
|
76 |
+
--data '{
|
77 |
+
"model": "gpt-3.5-turbo",
|
78 |
+
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
79 |
+
"stream": true
|
80 |
+
}'
|
81 |
+
```
|
82 |
+
|
83 |
+
将你账号的 `AccessToken` 或 `RefreshToken` 作为 `{{ Token }}` 传入。
|
84 |
+
也可填写你设置的环境变量 `Authorization` 的值, 将会随机选择后台账号
|
85 |
+
|
86 |
+
如果有team账号,可以传入 `ChatGPT-Account-ID`,使用 Team 工作区:
|
87 |
+
|
88 |
+
- 传入方式一:
|
89 |
+
`headers` 中传入 `ChatGPT-Account-ID`值
|
90 |
+
|
91 |
+
- 传入方式二:
|
92 |
+
`Authorization: Bearer <AccessToken 或 RefreshToken>,<ChatGPT-Account-ID>`
|
93 |
+
|
94 |
+
如果设置了 `AUTHORIZATION` 环境变量,可以将设置的值作为 `{{ Token }}` 传入进行多 Tokens 轮询。
|
95 |
+
|
96 |
+
> - `AccessToken` 获取: chatgpt官网登录后,再打开 [https://chatgpt.com/api/auth/session](https://chatgpt.com/api/auth/session) 获取 `accessToken` 这个值。
|
97 |
+
> - `RefreshToken` 获取: 此处不提供获取方法。
|
98 |
+
> - 免登录 gpt-3.5 无需传入 Token。
|
99 |
+
|
100 |
+
## Tokens 管理
|
101 |
+
|
102 |
+
1. 配置环境变量 `AUTHORIZATION` 作为 `授权码` ,然后运行程序。
|
103 |
+
|
104 |
+
2. 访问 `/tokens` 或者 `/{api_prefix}/tokens` 可以查看现有 Tokens 数量,也可以上传新的 Tokens ,或者清空 Tokens。
|
105 |
+
|
106 |
+
3. 请求时传入 `AUTHORIZATION` 中配置的 `授权码` 即可使用轮询的Tokens进行对话
|
107 |
+
|
108 |
+

|
109 |
+
|
110 |
+
## 官网原生镜像
|
111 |
+
|
112 |
+
1. 配置环境变量 `ENABLE_GATEWAY` 为 `true`,然后运行程序, 注意开启后别人也可以直接通过域名访问你的网关。
|
113 |
+
|
114 |
+
2. 在 Tokens 管理页面上传 `RefreshToken` 或 `AccessToken`
|
115 |
+
|
116 |
+
3. 访问 `/login` 到登录页面
|
117 |
+
|
118 |
+

|
119 |
+
|
120 |
+
4. 进入官网原生镜像页面使用
|
121 |
+
|
122 |
+

|
123 |
+
|
124 |
+
## 环境变量
|
125 |
+
|
126 |
+
每个环境变量都有默认值,如果不懂环境变量的含义,请不要设置,更不要传空值,字符串无需引号。
|
127 |
+
|
128 |
+
| 分类 | 变量名 | 示例值 | 默认值 | 描述 |
|
129 |
+
|------|-------------------|-------------------------------------------------------------|-----------------------|--------------------------------------------------------------|
|
130 |
+
| 安全相关 | API_PREFIX | `your_prefix` | `None` | API 前缀密码,不设置容易被人访问,设置后需请求 `/your_prefix/v1/chat/completions` |
|
131 |
+
| | AUTHORIZATION | `your_first_authorization`,<br/>`your_second_authorization` | `[]` | 你自己为使用多账号轮询 Tokens 设置的授权码,英文逗号分隔 |
|
132 |
+
| | AUTH_KEY | `your_auth_key` | `None` | 私人网关需要加`auth_key`请求头才设置该项 |
|
133 |
+
| 请求相关 | CHATGPT_BASE_URL | `https://chatgpt.com` | `https://chatgpt.com` | ChatGPT 网关地址,设置后会改变请求的网站,多个网关用逗号分隔 |
|
134 |
+
| | PROXY_URL | `http://ip:port`,<br/>`http://username:password@ip:port` | `[]` | 全局代理 URL,出 403 时启用,多个代理用逗号分隔 |
|
135 |
+
| | EXPORT_PROXY_URL | `http://ip:port`或<br/>`http://username:password@ip:port` | `None` | 出口代理 URL,防止请求图片和文件时泄漏源站 ip |
|
136 |
+
| 功能相关 | HISTORY_DISABLED | `true` | `true` | 是否不保存聊天记录并返回 conversation_id |
|
137 |
+
| | POW_DIFFICULTY | `00003a` | `00003a` | 要解决的工作量证明难度,不懂别设置 |
|
138 |
+
| | RETRY_TIMES | `3` | `3` | 出错重试次数,使用 `AUTHORIZATION` 会自动随机/轮询下一个账号 |
|
139 |
+
| | CONVERSATION_ONLY | `false` | `false` | 是否直接使用对话接口,如果你用的网关支持自动解决 `POW` 才启用 |
|
140 |
+
| | ENABLE_LIMIT | `true` | `true` | 开启后不尝试突破官方次数限制,尽可能防止封号 |
|
141 |
+
| | UPLOAD_BY_URL | `false` | `false` | 开启后按照 `URL+空格+正文` 进行对话,自动解析 URL 内容并上传,多个 URL 用空格分隔 |
|
142 |
+
| | SCHEDULED_REFRESH | `false` | `false` | 是否定时刷新 `AccessToken` ,开启后每次启动程序将会全部非强制刷新一次,每4天晚上3点全部强制刷新一次。 |
|
143 |
+
| | RANDOM_TOKEN | `true` | `true` | 是否随机选取后台 `Token` ,开启后随机后台账号,关闭后为顺序轮询 |
|
144 |
+
| 网关功能 | ENABLE_GATEWAY | `false` | `false` | 是否启用网关模式,开启后可以使用镜像站,但也将会不设防 |
|
145 |
+
| | AUTO_SEED | `false` | `true` | 是否启用随机账号模式,默认启用,输入`seed`后随机匹配后台`Token`。关闭之后需要手动对接接口,来进行`Token`管控。 |
|
146 |
+
|
147 |
+
## 部署
|
148 |
+
|
149 |
+
### Zeabur 部署
|
150 |
+
|
151 |
+
[](https://zeabur.com/templates/6HEGIZ?referralCode=LanQian528)
|
152 |
+
|
153 |
+
### 直接部署
|
154 |
+
|
155 |
+
```bash
|
156 |
+
git clone https://github.com/LanQian528/chat2api
|
157 |
+
cd chat2api
|
158 |
+
pip install -r requirements.txt
|
159 |
+
python app.py
|
160 |
+
```
|
161 |
+
|
162 |
+
### Docker 部署
|
163 |
+
|
164 |
+
您需要安装 Docker 和 Docker Compose。
|
165 |
+
|
166 |
+
```bash
|
167 |
+
docker run -d \
|
168 |
+
--name chat2api \
|
169 |
+
-p 5005:5005 \
|
170 |
+
lanqian528/chat2api:latest
|
171 |
+
```
|
172 |
+
|
173 |
+
### (推荐,可用 PLUS 账号) Docker Compose 部署
|
174 |
+
|
175 |
+
创建一个新的目录,例如 chat2api,���进入该目录:
|
176 |
+
|
177 |
+
```bash
|
178 |
+
mkdir chat2api
|
179 |
+
cd chat2api
|
180 |
+
```
|
181 |
+
|
182 |
+
在此目录中下载库中的 docker-compose.yml 文件:
|
183 |
+
|
184 |
+
```bash
|
185 |
+
wget https://raw.githubusercontent.com/LanQian528/chat2api/main/docker-compose-warp.yml
|
186 |
+
```
|
187 |
+
|
188 |
+
修改 docker-compose-warp.yml 文件中的环境变量,保存后:
|
189 |
+
|
190 |
+
```bash
|
191 |
+
docker-compose up -d
|
192 |
+
```
|
193 |
+
|
194 |
+
|
195 |
+
## 常见问题
|
196 |
+
|
197 |
+
> - 错误代码:
|
198 |
+
> - `401`:当前 IP 不支持免登录,请尝试更换 IP 地址,或者在环境变量 `PROXY_URL` 中设置代理,或者你的身份验证失败。
|
199 |
+
> - `403`:请在日志中查看具体报错信息。
|
200 |
+
> - `429`:当前 IP 请求1小时内请求超过限制,请稍后再试,或更换 IP。
|
201 |
+
> - `500`:服务器内部错误,请求失败。
|
202 |
+
> - `502`:服务器网关错误,或网络不可用,请尝试更换网络环境。
|
203 |
+
|
204 |
+
> - 已知情况:
|
205 |
+
> - 日本 IP 很多不支持免登,免登 GPT-3.5 建议使用美国 IP。
|
206 |
+
> - 99%的账号都支持免费 `GPT-4o` ,但根据 IP 地区开启,目前日本和新加坡 IP 已知开启概率较大。
|
207 |
+
|
208 |
+
> - 环境变量 `AUTHORIZATION` 是什么?
|
209 |
+
> - 是一个自己给 chat2api 设置的一个身份验证,设置后才可使用已保存的 Tokens 轮询,请求时当作 `APIKEY` 传入。
|
210 |
+
> - AccessToken 如何获取?
|
211 |
+
> - chatgpt官网登录后,再打开 [https://chatgpt.com/api/auth/session](https://chatgpt.com/api/auth/session) 获取 `accessToken` 这个值。
|
212 |
+
|
213 |
+
|
214 |
+
## License
|
215 |
+
|
216 |
+
MIT License
|
217 |
+
|
api/chat2api.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import types
|
3 |
+
|
4 |
+
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
5 |
+
from fastapi import Request, HTTPException, Form, Security
|
6 |
+
from fastapi.responses import HTMLResponse, StreamingResponse, JSONResponse
|
7 |
+
from fastapi.security import HTTPAuthorizationCredentials
|
8 |
+
from starlette.background import BackgroundTask
|
9 |
+
|
10 |
+
import utils.globals as globals
|
11 |
+
from app import app, templates, security_scheme
|
12 |
+
from chatgpt.ChatService import ChatService
|
13 |
+
from chatgpt.authorization import refresh_all_tokens
|
14 |
+
from utils.Logger import logger
|
15 |
+
from utils.configs import api_prefix, scheduled_refresh
|
16 |
+
from utils.retry import async_retry
|
17 |
+
|
18 |
+
scheduler = AsyncIOScheduler()
|
19 |
+
|
20 |
+
|
21 |
+
@app.on_event("startup")
|
22 |
+
async def app_start():
|
23 |
+
if scheduled_refresh:
|
24 |
+
scheduler.add_job(id='refresh', func=refresh_all_tokens, trigger='cron', hour=3, minute=0, day='*/2',
|
25 |
+
kwargs={'force_refresh': True})
|
26 |
+
scheduler.start()
|
27 |
+
asyncio.get_event_loop().call_later(0, lambda: asyncio.create_task(refresh_all_tokens(force_refresh=False)))
|
28 |
+
|
29 |
+
|
30 |
+
async def to_send_conversation(request_data, req_token):
|
31 |
+
chat_service = ChatService(req_token)
|
32 |
+
try:
|
33 |
+
await chat_service.set_dynamic_data(request_data)
|
34 |
+
await chat_service.get_chat_requirements()
|
35 |
+
return chat_service
|
36 |
+
except HTTPException as e:
|
37 |
+
await chat_service.close_client()
|
38 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
39 |
+
except Exception as e:
|
40 |
+
await chat_service.close_client()
|
41 |
+
logger.error(f"Server error, {str(e)}")
|
42 |
+
raise HTTPException(status_code=500, detail="Server error")
|
43 |
+
|
44 |
+
|
45 |
+
async def process(request_data, req_token):
|
46 |
+
chat_service = await to_send_conversation(request_data, req_token)
|
47 |
+
await chat_service.prepare_send_conversation()
|
48 |
+
res = await chat_service.send_conversation()
|
49 |
+
return chat_service, res
|
50 |
+
|
51 |
+
|
52 |
+
@app.post(f"/{api_prefix}/v1/chat/completions" if api_prefix else "/v1/chat/completions")
|
53 |
+
async def send_conversation(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
|
54 |
+
req_token = credentials.credentials
|
55 |
+
try:
|
56 |
+
request_data = await request.json()
|
57 |
+
except Exception:
|
58 |
+
raise HTTPException(status_code=400, detail={"error": "Invalid JSON body"})
|
59 |
+
chat_service, res = await async_retry(process, request_data, req_token)
|
60 |
+
try:
|
61 |
+
if isinstance(res, types.AsyncGeneratorType):
|
62 |
+
background = BackgroundTask(chat_service.close_client)
|
63 |
+
return StreamingResponse(res, media_type="text/event-stream", background=background)
|
64 |
+
else:
|
65 |
+
background = BackgroundTask(chat_service.close_client)
|
66 |
+
return JSONResponse(res, media_type="application/json", background=background)
|
67 |
+
except HTTPException as e:
|
68 |
+
await chat_service.close_client()
|
69 |
+
if e.status_code == 500:
|
70 |
+
logger.error(f"Server error, {str(e)}")
|
71 |
+
raise HTTPException(status_code=500, detail="Server error")
|
72 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
73 |
+
except Exception as e:
|
74 |
+
await chat_service.close_client()
|
75 |
+
logger.error(f"Server error, {str(e)}")
|
76 |
+
raise HTTPException(status_code=500, detail="Server error")
|
77 |
+
|
78 |
+
|
79 |
+
@app.get(f"/{api_prefix}/tokens" if api_prefix else "/tokens", response_class=HTMLResponse)
|
80 |
+
async def upload_html(request: Request):
|
81 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
82 |
+
return templates.TemplateResponse("tokens.html",
|
83 |
+
{"request": request, "api_prefix": api_prefix, "tokens_count": tokens_count})
|
84 |
+
|
85 |
+
|
86 |
+
@app.post(f"/{api_prefix}/tokens/upload" if api_prefix else "/tokens/upload")
|
87 |
+
async def upload_post(text: str = Form(...)):
|
88 |
+
lines = text.split("\n")
|
89 |
+
for line in lines:
|
90 |
+
if line.strip() and not line.startswith("#"):
|
91 |
+
globals.token_list.append(line.strip())
|
92 |
+
with open(globals.TOKENS_FILE, "a", encoding="utf-8") as f:
|
93 |
+
f.write(line.strip() + "\n")
|
94 |
+
logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
|
95 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
96 |
+
return {"status": "success", "tokens_count": tokens_count}
|
97 |
+
|
98 |
+
|
99 |
+
@app.post(f"/{api_prefix}/tokens/clear" if api_prefix else "/tokens/clear")
|
100 |
+
async def upload_post():
|
101 |
+
globals.token_list.clear()
|
102 |
+
globals.error_token_list.clear()
|
103 |
+
with open(globals.TOKENS_FILE, "w", encoding="utf-8") as f:
|
104 |
+
pass
|
105 |
+
logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
|
106 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
107 |
+
return {"status": "success", "tokens_count": tokens_count}
|
108 |
+
|
109 |
+
|
110 |
+
@app.post(f"/{api_prefix}/tokens/error" if api_prefix else "/tokens/error")
|
111 |
+
async def error_tokens():
|
112 |
+
error_tokens_list = list(set(globals.error_token_list))
|
113 |
+
return {"status": "success", "error_tokens": error_tokens_list}
|
114 |
+
|
115 |
+
|
116 |
+
@app.get(f"/{api_prefix}/tokens/add/{{token}}" if api_prefix else "/tokens/add/{token}")
|
117 |
+
async def add_token(token: str):
|
118 |
+
if token.strip() and not token.startswith("#"):
|
119 |
+
globals.token_list.append(token.strip())
|
120 |
+
with open(globals.TOKENS_FILE, "a", encoding="utf-8") as f:
|
121 |
+
f.write(token.strip() + "\n")
|
122 |
+
logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
|
123 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
124 |
+
return {"status": "success", "tokens_count": tokens_count}
|
api/files.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
|
3 |
+
import pybase64
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
from utils.Client import Client
|
7 |
+
from utils.configs import export_proxy_url, cf_file_url
|
8 |
+
|
9 |
+
|
10 |
+
async def get_file_content(url):
|
11 |
+
if url.startswith("data:"):
|
12 |
+
mime_type, base64_data = url.split(';')[0].split(':')[1], url.split(',')[1]
|
13 |
+
file_content = pybase64.b64decode(base64_data)
|
14 |
+
return file_content, mime_type
|
15 |
+
else:
|
16 |
+
client = Client()
|
17 |
+
try:
|
18 |
+
if cf_file_url:
|
19 |
+
body = {"file_url": url}
|
20 |
+
r = await client.post(cf_file_url, timeout=60, json=body)
|
21 |
+
else:
|
22 |
+
r = await client.get(url, proxy=export_proxy_url, timeout=60)
|
23 |
+
if r.status_code != 200:
|
24 |
+
return None, None
|
25 |
+
file_content = r.content
|
26 |
+
mime_type = r.headers.get('Content-Type', '').split(';')[0].strip()
|
27 |
+
return file_content, mime_type
|
28 |
+
finally:
|
29 |
+
await client.close()
|
30 |
+
del client
|
31 |
+
|
32 |
+
|
33 |
+
async def determine_file_use_case(mime_type):
|
34 |
+
multimodal_types = ["image/jpeg", "image/webp", "image/png", "image/gif"]
|
35 |
+
my_files_types = ["text/x-php", "application/msword", "text/x-c", "text/html",
|
36 |
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
37 |
+
"application/json", "text/javascript", "application/pdf",
|
38 |
+
"text/x-java", "text/x-tex", "text/x-typescript", "text/x-sh",
|
39 |
+
"text/x-csharp", "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
40 |
+
"text/x-c++", "application/x-latext", "text/markdown", "text/plain",
|
41 |
+
"text/x-ruby", "text/x-script.python"]
|
42 |
+
|
43 |
+
if mime_type in multimodal_types:
|
44 |
+
return "multimodal"
|
45 |
+
elif mime_type in my_files_types:
|
46 |
+
return "my_files"
|
47 |
+
else:
|
48 |
+
return "ace_upload"
|
49 |
+
|
50 |
+
|
51 |
+
async def get_image_size(file_content):
|
52 |
+
with Image.open(io.BytesIO(file_content)) as img:
|
53 |
+
return img.width, img.height
|
54 |
+
|
55 |
+
|
56 |
+
async def get_file_extension(mime_type):
|
57 |
+
extension_mapping = {
|
58 |
+
"image/jpeg": ".jpg",
|
59 |
+
"image/png": ".png",
|
60 |
+
"image/gif": ".gif",
|
61 |
+
"image/webp": ".webp",
|
62 |
+
"text/x-php": ".php",
|
63 |
+
"application/msword": ".doc",
|
64 |
+
"text/x-c": ".c",
|
65 |
+
"text/html": ".html",
|
66 |
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
|
67 |
+
"application/json": ".json",
|
68 |
+
"text/javascript": ".js",
|
69 |
+
"application/pdf": ".pdf",
|
70 |
+
"text/x-java": ".java",
|
71 |
+
"text/x-tex": ".tex",
|
72 |
+
"text/x-typescript": ".ts",
|
73 |
+
"text/x-sh": ".sh",
|
74 |
+
"text/x-csharp": ".cs",
|
75 |
+
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
|
76 |
+
"text/x-c++": ".cpp",
|
77 |
+
"application/x-latex": ".latex",
|
78 |
+
"text/markdown": ".md",
|
79 |
+
"text/plain": ".txt",
|
80 |
+
"text/x-ruby": ".rb",
|
81 |
+
"text/x-script.python": ".py",
|
82 |
+
"application/zip": ".zip",
|
83 |
+
"application/x-zip-compressed": ".zip",
|
84 |
+
"application/x-tar": ".tar",
|
85 |
+
"application/x-compressed-tar": ".tar.gz",
|
86 |
+
"application/vnd.rar": ".rar",
|
87 |
+
"application/x-rar-compressed": ".rar",
|
88 |
+
"application/x-7z-compressed": ".7z",
|
89 |
+
"application/octet-stream": ".bin",
|
90 |
+
"audio/mpeg": ".mp3",
|
91 |
+
"audio/wav": ".wav",
|
92 |
+
"audio/ogg": ".ogg",
|
93 |
+
"audio/aac": ".aac",
|
94 |
+
"video/mp4": ".mp4",
|
95 |
+
"video/x-msvideo": ".avi",
|
96 |
+
"video/x-matroska": ".mkv",
|
97 |
+
"video/webm": ".webm",
|
98 |
+
"application/rtf": ".rtf",
|
99 |
+
"application/vnd.ms-excel": ".xls",
|
100 |
+
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
|
101 |
+
"text/css": ".css",
|
102 |
+
"text/xml": ".xml",
|
103 |
+
"application/xml": ".xml",
|
104 |
+
"application/vnd.android.package-archive": ".apk",
|
105 |
+
"application/vnd.apple.installer+xml": ".mpkg",
|
106 |
+
"application/x-bzip": ".bz",
|
107 |
+
"application/x-bzip2": ".bz2",
|
108 |
+
"application/x-csh": ".csh",
|
109 |
+
"application/x-debian-package": ".deb",
|
110 |
+
"application/x-dvi": ".dvi",
|
111 |
+
"application/java-archive": ".jar",
|
112 |
+
"application/x-java-jnlp-file": ".jnlp",
|
113 |
+
"application/vnd.mozilla.xul+xml": ".xul",
|
114 |
+
"application/vnd.ms-fontobject": ".eot",
|
115 |
+
"application/ogg": ".ogx",
|
116 |
+
"application/x-font-ttf": ".ttf",
|
117 |
+
"application/font-woff": ".woff",
|
118 |
+
"application/x-shockwave-flash": ".swf",
|
119 |
+
"application/vnd.visio": ".vsd",
|
120 |
+
"application/xhtml+xml": ".xhtml",
|
121 |
+
"application/vnd.ms-powerpoint": ".ppt",
|
122 |
+
"application/vnd.oasis.opendocument.text": ".odt",
|
123 |
+
"application/vnd.oasis.opendocument.spreadsheet": ".ods",
|
124 |
+
"application/x-xpinstall": ".xpi",
|
125 |
+
"application/vnd.google-earth.kml+xml": ".kml",
|
126 |
+
"application/vnd.google-earth.kmz": ".kmz",
|
127 |
+
"application/x-font-otf": ".otf",
|
128 |
+
"application/vnd.ms-excel.addin.macroEnabled.12": ".xlam",
|
129 |
+
"application/vnd.ms-excel.sheet.binary.macroEnabled.12": ".xlsb",
|
130 |
+
"application/vnd.ms-excel.template.macroEnabled.12": ".xltm",
|
131 |
+
"application/vnd.ms-powerpoint.addin.macroEnabled.12": ".ppam",
|
132 |
+
"application/vnd.ms-powerpoint.presentation.macroEnabled.12": ".pptm",
|
133 |
+
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12": ".ppsm",
|
134 |
+
"application/vnd.ms-powerpoint.template.macroEnabled.12": ".potm",
|
135 |
+
"application/vnd.ms-word.document.macroEnabled.12": ".docm",
|
136 |
+
"application/vnd.ms-word.template.macroEnabled.12": ".dotm",
|
137 |
+
"application/x-ms-application": ".application",
|
138 |
+
"application/x-ms-wmd": ".wmd",
|
139 |
+
"application/x-ms-wmz": ".wmz",
|
140 |
+
"application/x-ms-xbap": ".xbap",
|
141 |
+
"application/vnd.ms-xpsdocument": ".xps",
|
142 |
+
"application/x-silverlight-app": ".xap"
|
143 |
+
}
|
144 |
+
return extension_mapping.get(mime_type, "")
|
api/models.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model_proxy = {
|
2 |
+
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
|
3 |
+
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
|
4 |
+
"gpt-4": "gpt-4-0613",
|
5 |
+
"gpt-4-32k": "gpt-4-32k-0613",
|
6 |
+
"gpt-4-turbo-preview": "gpt-4-0125-preview",
|
7 |
+
"gpt-4-vision-preview": "gpt-4-1106-vision-preview",
|
8 |
+
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
|
9 |
+
"gpt-4o": "gpt-4o-2024-08-06",
|
10 |
+
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
11 |
+
"o1-preview": "o1-preview-2024-09-12",
|
12 |
+
"o1-mini": "o1-mini-2024-09-12",
|
13 |
+
"claude-3-opus": "claude-3-opus-20240229",
|
14 |
+
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
15 |
+
"claude-3-haiku": "claude-3-haiku-20240307",
|
16 |
+
}
|
17 |
+
|
18 |
+
model_system_fingerprint = {
|
19 |
+
"gpt-3.5-turbo-0125": ["fp_b28b39ffa8"],
|
20 |
+
"gpt-3.5-turbo-1106": ["fp_592ef5907d"],
|
21 |
+
"gpt-4-0125-preview": ["fp_f38f4d6482", "fp_2f57f81c11", "fp_a7daf7c51e", "fp_a865e8ede4", "fp_13c70b9f70",
|
22 |
+
"fp_b77cb481ed"],
|
23 |
+
"gpt-4-1106-preview": ["fp_e467c31c3d", "fp_d986a8d1ba", "fp_99a5a401bb", "fp_123d5a9f90", "fp_0d1affc7a6",
|
24 |
+
"fp_5c95a4634e"],
|
25 |
+
"gpt-4-turbo-2024-04-09": ["fp_d1bac968b4"],
|
26 |
+
"gpt-4o-2024-05-13": ["fp_3aa7262c27"],
|
27 |
+
"gpt-4o-mini-2024-07-18": ["fp_c9aa9c0491"]
|
28 |
+
}
|
api/tokens.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import tiktoken
|
4 |
+
|
5 |
+
|
6 |
+
async def calculate_image_tokens(width, height, detail):
|
7 |
+
if detail == "low":
|
8 |
+
return 85
|
9 |
+
else:
|
10 |
+
max_dimension = max(width, height)
|
11 |
+
if max_dimension > 2048:
|
12 |
+
scale_factor = 2048 / max_dimension
|
13 |
+
new_width = int(width * scale_factor)
|
14 |
+
new_height = int(height * scale_factor)
|
15 |
+
else:
|
16 |
+
new_width = width
|
17 |
+
new_height = height
|
18 |
+
|
19 |
+
width, height = new_width, new_height
|
20 |
+
min_dimension = min(width, height)
|
21 |
+
if min_dimension > 768:
|
22 |
+
scale_factor = 768 / min_dimension
|
23 |
+
new_width = int(width * scale_factor)
|
24 |
+
new_height = int(height * scale_factor)
|
25 |
+
else:
|
26 |
+
new_width = width
|
27 |
+
new_height = height
|
28 |
+
|
29 |
+
width, height = new_width, new_height
|
30 |
+
num_masks_w = math.ceil(width / 512)
|
31 |
+
num_masks_h = math.ceil(height / 512)
|
32 |
+
total_masks = num_masks_w * num_masks_h
|
33 |
+
|
34 |
+
tokens_per_mask = 170
|
35 |
+
total_tokens = total_masks * tokens_per_mask + 85
|
36 |
+
|
37 |
+
return total_tokens
|
38 |
+
|
39 |
+
|
40 |
+
async def num_tokens_from_messages(messages, model=''):
|
41 |
+
try:
|
42 |
+
encoding = tiktoken.encoding_for_model(model)
|
43 |
+
except KeyError:
|
44 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
45 |
+
if model == "gpt-3.5-turbo-0301":
|
46 |
+
tokens_per_message = 4
|
47 |
+
else:
|
48 |
+
tokens_per_message = 3
|
49 |
+
num_tokens = 0
|
50 |
+
for message in messages:
|
51 |
+
num_tokens += tokens_per_message
|
52 |
+
for key, value in message.items():
|
53 |
+
if isinstance(value, list):
|
54 |
+
for item in value:
|
55 |
+
if item.get("type") == "text":
|
56 |
+
num_tokens += len(encoding.encode(item.get("text")))
|
57 |
+
if item.get("type") == "image_url":
|
58 |
+
pass
|
59 |
+
else:
|
60 |
+
num_tokens += len(encoding.encode(value))
|
61 |
+
num_tokens += 3
|
62 |
+
return num_tokens
|
63 |
+
|
64 |
+
|
65 |
+
async def num_tokens_from_content(content, model=None):
|
66 |
+
try:
|
67 |
+
encoding = tiktoken.encoding_for_model(model)
|
68 |
+
except KeyError:
|
69 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
70 |
+
encoded_content = encoding.encode(content)
|
71 |
+
len_encoded_content = len(encoded_content)
|
72 |
+
return len_encoded_content
|
73 |
+
|
74 |
+
|
75 |
+
async def split_tokens_from_content(content, max_tokens, model=None):
|
76 |
+
try:
|
77 |
+
encoding = tiktoken.encoding_for_model(model)
|
78 |
+
except KeyError:
|
79 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
80 |
+
encoded_content = encoding.encode(content)
|
81 |
+
len_encoded_content = len(encoded_content)
|
82 |
+
if len_encoded_content >= max_tokens:
|
83 |
+
content = encoding.decode(encoded_content[:max_tokens])
|
84 |
+
return content, max_tokens, "length"
|
85 |
+
else:
|
86 |
+
return content, len_encoded_content, "stop"
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import uvicorn
|
4 |
+
from fastapi import FastAPI, HTTPException
|
5 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
6 |
+
from fastapi.middleware.cors import CORSMiddleware
|
7 |
+
from fastapi.templating import Jinja2Templates
|
8 |
+
|
9 |
+
from utils.configs import enable_gateway, api_prefix
|
10 |
+
|
11 |
+
warnings.filterwarnings("ignore")
|
12 |
+
|
13 |
+
|
14 |
+
log_config = uvicorn.config.LOGGING_CONFIG
|
15 |
+
default_format = "%(asctime)s | %(levelname)s | %(message)s"
|
16 |
+
access_format = r'%(asctime)s | %(levelname)s | %(client_addr)s: %(request_line)s %(status_code)s'
|
17 |
+
log_config["formatters"]["default"]["fmt"] = default_format
|
18 |
+
log_config["formatters"]["access"]["fmt"] = access_format
|
19 |
+
|
20 |
+
app = FastAPI(
|
21 |
+
docs_url=f"/{api_prefix}/docs", # 设置 Swagger UI 文档路径
|
22 |
+
redoc_url=f"/{api_prefix}/redoc", # 设置 Redoc 文档路径
|
23 |
+
openapi_url=f"/{api_prefix}/openapi.json" # 设置 OpenAPI JSON 路径
|
24 |
+
)
|
25 |
+
|
26 |
+
app.add_middleware(
|
27 |
+
CORSMiddleware,
|
28 |
+
allow_origins=["*"],
|
29 |
+
allow_credentials=True,
|
30 |
+
allow_methods=["*"],
|
31 |
+
allow_headers=["*"],
|
32 |
+
)
|
33 |
+
|
34 |
+
templates = Jinja2Templates(directory="templates")
|
35 |
+
security_scheme = HTTPBearer()
|
36 |
+
|
37 |
+
from app import app
|
38 |
+
|
39 |
+
import api.chat2api
|
40 |
+
|
41 |
+
if enable_gateway:
|
42 |
+
import gateway.share
|
43 |
+
import gateway.login
|
44 |
+
import gateway.chatgpt
|
45 |
+
import gateway.gpts
|
46 |
+
import gateway.admin
|
47 |
+
import gateway.v1
|
48 |
+
import gateway.backend
|
49 |
+
else:
|
50 |
+
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
|
51 |
+
async def reverse_proxy():
|
52 |
+
raise HTTPException(status_code=404, detail="Gateway is disabled")
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
uvicorn.run("app:app", host="0.0.0.0", port=5005)
|
57 |
+
# uvicorn.run("app:app", host="0.0.0.0", port=5005, ssl_keyfile="key.pem", ssl_certfile="cert.pem")
|
chatgpt/ChatService.py
ADDED
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import uuid
|
5 |
+
|
6 |
+
from fastapi import HTTPException
|
7 |
+
from starlette.concurrency import run_in_threadpool
|
8 |
+
|
9 |
+
from api.files import get_image_size, get_file_extension, determine_file_use_case
|
10 |
+
from api.models import model_proxy
|
11 |
+
from chatgpt.authorization import get_req_token, verify_token
|
12 |
+
from chatgpt.chatFormat import api_messages_to_chat, stream_response, format_not_stream_response, head_process_response
|
13 |
+
from chatgpt.chatLimit import check_is_limit, handle_request_limit
|
14 |
+
from chatgpt.fp import get_fp
|
15 |
+
from chatgpt.proofofWork import get_config, get_dpl, get_answer_token, get_requirements_token
|
16 |
+
|
17 |
+
from utils.Client import Client
|
18 |
+
from utils.Logger import logger
|
19 |
+
from utils.configs import (
|
20 |
+
chatgpt_base_url_list,
|
21 |
+
ark0se_token_url_list,
|
22 |
+
sentinel_proxy_url_list,
|
23 |
+
history_disabled,
|
24 |
+
pow_difficulty,
|
25 |
+
conversation_only,
|
26 |
+
enable_limit,
|
27 |
+
upload_by_url,
|
28 |
+
auth_key,
|
29 |
+
turnstile_solver_url,
|
30 |
+
oai_language,
|
31 |
+
)
|
32 |
+
|
33 |
+
|
34 |
+
class ChatService:
|
35 |
+
def __init__(self, origin_token=None):
|
36 |
+
# self.user_agent = random.choice(user_agents_list) if user_agents_list else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
37 |
+
self.req_token = get_req_token(origin_token)
|
38 |
+
self.chat_token = "gAAAAAB"
|
39 |
+
self.s = None
|
40 |
+
self.ss = None
|
41 |
+
self.ws = None
|
42 |
+
|
43 |
+
async def set_dynamic_data(self, data):
|
44 |
+
if self.req_token:
|
45 |
+
req_len = len(self.req_token.split(","))
|
46 |
+
if req_len == 1:
|
47 |
+
self.access_token = await verify_token(self.req_token)
|
48 |
+
self.account_id = None
|
49 |
+
else:
|
50 |
+
self.access_token = await verify_token(self.req_token.split(",")[0])
|
51 |
+
self.account_id = self.req_token.split(",")[1]
|
52 |
+
else:
|
53 |
+
logger.info("Request token is empty, use no-auth 3.5")
|
54 |
+
self.access_token = None
|
55 |
+
self.account_id = None
|
56 |
+
|
57 |
+
self.fp = get_fp(self.req_token).copy()
|
58 |
+
self.proxy_url = self.fp.pop("proxy_url", None)
|
59 |
+
self.impersonate = self.fp.pop("impersonate", "safari15_3")
|
60 |
+
self.user_agent = self.fp.get("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0")
|
61 |
+
logger.info(f"Request token: {self.req_token}")
|
62 |
+
logger.info(f"Request proxy: {self.proxy_url}")
|
63 |
+
logger.info(f"Request UA: {self.user_agent}")
|
64 |
+
logger.info(f"Request impersonate: {self.impersonate}")
|
65 |
+
|
66 |
+
self.data = data
|
67 |
+
await self.set_model()
|
68 |
+
if enable_limit and self.req_token:
|
69 |
+
limit_response = await handle_request_limit(self.req_token, self.req_model)
|
70 |
+
if limit_response:
|
71 |
+
raise HTTPException(status_code=429, detail=limit_response)
|
72 |
+
|
73 |
+
self.account_id = self.data.get('Chatgpt-Account-Id', self.account_id)
|
74 |
+
self.parent_message_id = self.data.get('parent_message_id')
|
75 |
+
self.conversation_id = self.data.get('conversation_id')
|
76 |
+
self.history_disabled = self.data.get('history_disabled', history_disabled)
|
77 |
+
|
78 |
+
self.api_messages = self.data.get("messages", [])
|
79 |
+
self.prompt_tokens = 0
|
80 |
+
self.max_tokens = self.data.get("max_tokens", 2147483647)
|
81 |
+
if not isinstance(self.max_tokens, int):
|
82 |
+
self.max_tokens = 2147483647
|
83 |
+
|
84 |
+
# self.proxy_url = random.choice(proxy_url_list) if proxy_url_list else None
|
85 |
+
|
86 |
+
self.host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
|
87 |
+
self.ark0se_token_url = random.choice(ark0se_token_url_list) if ark0se_token_url_list else None
|
88 |
+
|
89 |
+
self.s = Client(proxy=self.proxy_url, impersonate=self.impersonate)
|
90 |
+
if sentinel_proxy_url_list:
|
91 |
+
self.ss = Client(proxy=random.choice(sentinel_proxy_url_list), impersonate=self.impersonate)
|
92 |
+
else:
|
93 |
+
self.ss = self.s
|
94 |
+
|
95 |
+
self.persona = None
|
96 |
+
self.ark0se_token = None
|
97 |
+
self.proof_token = None
|
98 |
+
self.turnstile_token = None
|
99 |
+
|
100 |
+
self.chat_headers = None
|
101 |
+
self.chat_request = None
|
102 |
+
|
103 |
+
self.base_headers = {
|
104 |
+
'accept': '*/*',
|
105 |
+
'accept-encoding': 'gzip, deflate, br, zstd',
|
106 |
+
'accept-language': 'en-US,en;q=0.9',
|
107 |
+
'content-type': 'application/json',
|
108 |
+
'oai-language': oai_language,
|
109 |
+
'origin': self.host_url,
|
110 |
+
'priority': 'u=1, i',
|
111 |
+
'referer': f'{self.host_url}/',
|
112 |
+
'sec-fetch-dest': 'empty',
|
113 |
+
'sec-fetch-mode': 'cors',
|
114 |
+
'sec-fetch-site': 'same-origin'
|
115 |
+
}
|
116 |
+
self.base_headers.update(self.fp)
|
117 |
+
|
118 |
+
if self.access_token:
|
119 |
+
self.base_url = self.host_url + "/backend-api"
|
120 |
+
self.base_headers['authorization'] = f'Bearer {self.access_token}'
|
121 |
+
if self.account_id:
|
122 |
+
self.base_headers['chatgpt-account-id'] = self.account_id
|
123 |
+
else:
|
124 |
+
self.base_url = self.host_url + "/backend-anon"
|
125 |
+
|
126 |
+
if auth_key:
|
127 |
+
self.base_headers['authkey'] = auth_key
|
128 |
+
|
129 |
+
await get_dpl(self)
|
130 |
+
|
131 |
+
async def set_model(self):
|
132 |
+
self.origin_model = self.data.get("model", "gpt-3.5-turbo-0125")
|
133 |
+
self.resp_model = model_proxy.get(self.origin_model, self.origin_model)
|
134 |
+
if "gizmo" in self.origin_model or "g-" in self.origin_model:
|
135 |
+
self.gizmo_id = "g-" + self.origin_model.split("g-")[-1]
|
136 |
+
else:
|
137 |
+
self.gizmo_id = None
|
138 |
+
|
139 |
+
if "o1-preview" in self.origin_model:
|
140 |
+
self.req_model = "o1-preview"
|
141 |
+
elif "o1-pro" in self.origin_model:
|
142 |
+
self.req_model = "o1-pro"
|
143 |
+
elif "o1-mini" in self.origin_model:
|
144 |
+
self.req_model = "o1-mini"
|
145 |
+
elif "o1" in self.origin_model:
|
146 |
+
self.req_model = "o1"
|
147 |
+
elif "gpt-4.5o" in self.origin_model:
|
148 |
+
self.req_model = "gpt-4.5o"
|
149 |
+
elif "gpt-4o-canmore" in self.origin_model:
|
150 |
+
self.req_model = "gpt-4o-canmore"
|
151 |
+
elif "gpt-4o-mini" in self.origin_model:
|
152 |
+
self.req_model = "gpt-4o-mini"
|
153 |
+
elif "gpt-4o" in self.origin_model:
|
154 |
+
self.req_model = "gpt-4o"
|
155 |
+
elif "gpt-4-mobile" in self.origin_model:
|
156 |
+
self.req_model = "gpt-4-mobile"
|
157 |
+
elif "gpt-4" in self.origin_model:
|
158 |
+
self.req_model = "gpt-4"
|
159 |
+
elif "gpt-3.5" in self.origin_model:
|
160 |
+
self.req_model = "text-davinci-002-render-sha"
|
161 |
+
elif "auto" in self.origin_model:
|
162 |
+
self.req_model = "auto"
|
163 |
+
else:
|
164 |
+
self.req_model = "gpt-4o"
|
165 |
+
|
166 |
+
async def get_chat_requirements(self):
|
167 |
+
if conversation_only:
|
168 |
+
return None
|
169 |
+
url = f'{self.base_url}/sentinel/chat-requirements'
|
170 |
+
headers = self.base_headers.copy()
|
171 |
+
try:
|
172 |
+
config = get_config(self.user_agent)
|
173 |
+
p = get_requirements_token(config)
|
174 |
+
data = {'p': p}
|
175 |
+
r = await self.ss.post(url, headers=headers, json=data, timeout=5)
|
176 |
+
if r.status_code == 200:
|
177 |
+
resp = r.json()
|
178 |
+
|
179 |
+
self.persona = resp.get("persona")
|
180 |
+
if self.persona != "chatgpt-paid":
|
181 |
+
if self.req_model == "gpt-4" or self.req_model == "o1-preview":
|
182 |
+
logger.error(f"Model {self.resp_model} not support for {self.persona}")
|
183 |
+
raise HTTPException(
|
184 |
+
status_code=404,
|
185 |
+
detail={
|
186 |
+
"message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
|
187 |
+
"type": "invalid_request_error",
|
188 |
+
"param": None,
|
189 |
+
"code": "model_not_found",
|
190 |
+
},
|
191 |
+
)
|
192 |
+
|
193 |
+
turnstile = resp.get('turnstile', {})
|
194 |
+
turnstile_required = turnstile.get('required')
|
195 |
+
if turnstile_required:
|
196 |
+
turnstile_dx = turnstile.get("dx")
|
197 |
+
try:
|
198 |
+
if turnstile_solver_url:
|
199 |
+
res = await self.s.post(
|
200 |
+
turnstile_solver_url, json={"url": "https://chatgpt.com", "p": p, "dx": turnstile_dx}
|
201 |
+
)
|
202 |
+
self.turnstile_token = res.json().get("t")
|
203 |
+
except Exception as e:
|
204 |
+
logger.info(f"Turnstile ignored: {e}")
|
205 |
+
# raise HTTPException(status_code=403, detail="Turnstile required")
|
206 |
+
|
207 |
+
ark0se = resp.get('ark' + 'ose', {})
|
208 |
+
ark0se_required = ark0se.get('required')
|
209 |
+
if ark0se_required:
|
210 |
+
if self.persona == "chatgpt-freeaccount":
|
211 |
+
ark0se_method = "chat35"
|
212 |
+
else:
|
213 |
+
ark0se_method = "chat4"
|
214 |
+
if not self.ark0se_token_url:
|
215 |
+
raise HTTPException(status_code=403, detail="Ark0se service required")
|
216 |
+
ark0se_dx = ark0se.get("dx")
|
217 |
+
ark0se_client = Client(impersonate=self.impersonate)
|
218 |
+
try:
|
219 |
+
r2 = await ark0se_client.post(
|
220 |
+
url=self.ark0se_token_url, json={"blob": ark0se_dx, "method": ark0se_method}, timeout=15
|
221 |
+
)
|
222 |
+
r2esp = r2.json()
|
223 |
+
logger.info(f"ark0se_token: {r2esp}")
|
224 |
+
if r2esp.get('solved', True):
|
225 |
+
self.ark0se_token = r2esp.get('token')
|
226 |
+
else:
|
227 |
+
raise HTTPException(status_code=403, detail="Failed to get Ark0se token")
|
228 |
+
except Exception:
|
229 |
+
raise HTTPException(status_code=403, detail="Failed to get Ark0se token")
|
230 |
+
finally:
|
231 |
+
await ark0se_client.close()
|
232 |
+
|
233 |
+
proofofwork = resp.get('proofofwork', {})
|
234 |
+
proofofwork_required = proofofwork.get('required')
|
235 |
+
if proofofwork_required:
|
236 |
+
proofofwork_diff = proofofwork.get("difficulty")
|
237 |
+
if proofofwork_diff <= pow_difficulty:
|
238 |
+
raise HTTPException(status_code=403, detail=f"Proof of work difficulty too high: {proofofwork_diff}")
|
239 |
+
proofofwork_seed = proofofwork.get("seed")
|
240 |
+
self.proof_token, solved = await run_in_threadpool(
|
241 |
+
get_answer_token, proofofwork_seed, proofofwork_diff, config
|
242 |
+
)
|
243 |
+
if not solved:
|
244 |
+
raise HTTPException(status_code=403, detail="Failed to solve proof of work")
|
245 |
+
|
246 |
+
self.chat_token = resp.get('token')
|
247 |
+
if not self.chat_token:
|
248 |
+
raise HTTPException(status_code=403, detail=f"Failed to get chat token: {r.text}")
|
249 |
+
return self.chat_token
|
250 |
+
else:
|
251 |
+
if "application/json" == r.headers.get("Content-Type", ""):
|
252 |
+
detail = r.json().get("detail", r.json())
|
253 |
+
else:
|
254 |
+
detail = r.text
|
255 |
+
if "cf_chl_opt" in detail:
|
256 |
+
raise HTTPException(status_code=r.status_code, detail="cf_chl_opt")
|
257 |
+
if r.status_code == 429:
|
258 |
+
raise HTTPException(status_code=r.status_code, detail="rate-limit")
|
259 |
+
raise HTTPException(status_code=r.status_code, detail=detail)
|
260 |
+
except HTTPException as e:
|
261 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
262 |
+
except Exception as e:
|
263 |
+
raise HTTPException(status_code=500, detail=str(e))
|
264 |
+
|
265 |
+
async def prepare_send_conversation(self):
|
266 |
+
try:
|
267 |
+
chat_messages, self.prompt_tokens = await api_messages_to_chat(self, self.api_messages, upload_by_url)
|
268 |
+
except Exception as e:
|
269 |
+
logger.error(f"Failed to format messages: {str(e)}")
|
270 |
+
raise HTTPException(status_code=400, detail="Failed to format messages.")
|
271 |
+
self.chat_headers = self.base_headers.copy()
|
272 |
+
self.chat_headers.update(
|
273 |
+
{
|
274 |
+
'accept': 'text/event-stream',
|
275 |
+
'openai-sentinel-chat-requirements-token': self.chat_token,
|
276 |
+
'openai-sentinel-proof-token': self.proof_token,
|
277 |
+
}
|
278 |
+
)
|
279 |
+
if self.ark0se_token:
|
280 |
+
self.chat_headers['openai-sentinel-ark' + 'ose-token'] = self.ark0se_token
|
281 |
+
|
282 |
+
if self.turnstile_token:
|
283 |
+
self.chat_headers['openai-sentinel-turnstile-token'] = self.turnstile_token
|
284 |
+
|
285 |
+
if conversation_only:
|
286 |
+
self.chat_headers.pop('openai-sentinel-chat-requirements-token', None)
|
287 |
+
self.chat_headers.pop('openai-sentinel-proof-token', None)
|
288 |
+
self.chat_headers.pop('openai-sentinel-ark' + 'ose-token', None)
|
289 |
+
self.chat_headers.pop('openai-sentinel-turnstile-token', None)
|
290 |
+
|
291 |
+
if self.gizmo_id:
|
292 |
+
conversation_mode = {"kind": "gizmo_interaction", "gizmo_id": self.gizmo_id}
|
293 |
+
logger.info(f"Gizmo id: {self.gizmo_id}")
|
294 |
+
else:
|
295 |
+
conversation_mode = {"kind": "primary_assistant"}
|
296 |
+
|
297 |
+
logger.info(f"Model mapping: {self.origin_model} -> {self.req_model}")
|
298 |
+
self.chat_request = {
|
299 |
+
"action": "next",
|
300 |
+
"client_contextual_info": {
|
301 |
+
"is_dark_mode": False,
|
302 |
+
"time_since_loaded": random.randint(50, 500),
|
303 |
+
"page_height": random.randint(500, 1000),
|
304 |
+
"page_width": random.randint(1000, 2000),
|
305 |
+
"pixel_ratio": 1.5,
|
306 |
+
"screen_height": random.randint(800, 1200),
|
307 |
+
"screen_width": random.randint(1200, 2200),
|
308 |
+
},
|
309 |
+
"conversation_mode": conversation_mode,
|
310 |
+
"conversation_origin": None,
|
311 |
+
"force_paragen": False,
|
312 |
+
"force_paragen_model_slug": "",
|
313 |
+
"force_rate_limit": False,
|
314 |
+
"force_use_sse": True,
|
315 |
+
"history_and_training_disabled": self.history_disabled,
|
316 |
+
"messages": chat_messages,
|
317 |
+
"model": self.req_model,
|
318 |
+
"paragen_cot_summary_display_override": "allow",
|
319 |
+
"paragen_stream_type_override": None,
|
320 |
+
"parent_message_id": self.parent_message_id if self.parent_message_id else f"{uuid.uuid4()}",
|
321 |
+
"reset_rate_limits": False,
|
322 |
+
"suggestions": [],
|
323 |
+
"supported_encodings": [],
|
324 |
+
"system_hints": [],
|
325 |
+
"timezone": "America/Los_Angeles",
|
326 |
+
"timezone_offset_min": -480,
|
327 |
+
"variant_purpose": "comparison_implicit",
|
328 |
+
"websocket_request_id": f"{uuid.uuid4()}",
|
329 |
+
}
|
330 |
+
if self.conversation_id:
|
331 |
+
self.chat_request['conversation_id'] = self.conversation_id
|
332 |
+
return self.chat_request
|
333 |
+
|
334 |
+
async def send_conversation(self):
|
335 |
+
try:
|
336 |
+
url = f'{self.base_url}/conversation'
|
337 |
+
stream = self.data.get("stream", False)
|
338 |
+
r = await self.s.post_stream(url, headers=self.chat_headers, json=self.chat_request, timeout=10, stream=True)
|
339 |
+
if r.status_code != 200:
|
340 |
+
rtext = await r.atext()
|
341 |
+
if "application/json" == r.headers.get("Content-Type", ""):
|
342 |
+
detail = json.loads(rtext).get("detail", json.loads(rtext))
|
343 |
+
if r.status_code == 429:
|
344 |
+
check_is_limit(detail, token=self.req_token, model=self.req_model)
|
345 |
+
else:
|
346 |
+
if "cf_chl_opt" in rtext:
|
347 |
+
# logger.error(f"Failed to send conversation: cf_chl_opt")
|
348 |
+
raise HTTPException(status_code=r.status_code, detail="cf_chl_opt")
|
349 |
+
if r.status_code == 429:
|
350 |
+
# logger.error(f"Failed to send conversation: rate-limit")
|
351 |
+
raise HTTPException(status_code=r.status_code, detail="rate-limit")
|
352 |
+
detail = r.text[:100]
|
353 |
+
# logger.error(f"Failed to send conversation: {detail}")
|
354 |
+
raise HTTPException(status_code=r.status_code, detail=detail)
|
355 |
+
|
356 |
+
content_type = r.headers.get("Content-Type", "")
|
357 |
+
if "text/event-stream" in content_type:
|
358 |
+
res, start = await head_process_response(r.aiter_lines())
|
359 |
+
if not start:
|
360 |
+
raise HTTPException(
|
361 |
+
status_code=403,
|
362 |
+
detail="Our systems have detected unusual activity coming from your system. Please try again later.",
|
363 |
+
)
|
364 |
+
if stream:
|
365 |
+
return stream_response(self, res, self.resp_model, self.max_tokens)
|
366 |
+
else:
|
367 |
+
return await format_not_stream_response(
|
368 |
+
stream_response(self, res, self.resp_model, self.max_tokens),
|
369 |
+
self.prompt_tokens,
|
370 |
+
self.max_tokens,
|
371 |
+
self.resp_model,
|
372 |
+
)
|
373 |
+
elif "application/json" in content_type:
|
374 |
+
rtext = await r.atext()
|
375 |
+
resp = json.loads(rtext)
|
376 |
+
raise HTTPException(status_code=r.status_code, detail=resp)
|
377 |
+
else:
|
378 |
+
rtext = await r.atext()
|
379 |
+
raise HTTPException(status_code=r.status_code, detail=rtext)
|
380 |
+
except HTTPException as e:
|
381 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
382 |
+
except Exception as e:
|
383 |
+
raise HTTPException(status_code=500, detail=str(e))
|
384 |
+
|
385 |
+
async def get_download_url(self, file_id):
|
386 |
+
url = f"{self.base_url}/files/{file_id}/download"
|
387 |
+
headers = self.base_headers.copy()
|
388 |
+
try:
|
389 |
+
r = await self.s.get(url, headers=headers, timeout=10)
|
390 |
+
if r.status_code == 200:
|
391 |
+
download_url = r.json().get('download_url')
|
392 |
+
return download_url
|
393 |
+
else:
|
394 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
395 |
+
except Exception as e:
|
396 |
+
logger.error(f"Failed to get download url: {e}")
|
397 |
+
return ""
|
398 |
+
|
399 |
+
async def get_download_url_from_upload(self, file_id):
|
400 |
+
url = f"{self.base_url}/files/{file_id}/uploaded"
|
401 |
+
headers = self.base_headers.copy()
|
402 |
+
try:
|
403 |
+
r = await self.s.post(url, headers=headers, json={}, timeout=10)
|
404 |
+
if r.status_code == 200:
|
405 |
+
download_url = r.json().get('download_url')
|
406 |
+
return download_url
|
407 |
+
else:
|
408 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
409 |
+
except Exception as e:
|
410 |
+
logger.error(f"Failed to get download url from upload: {e}")
|
411 |
+
return ""
|
412 |
+
|
413 |
+
async def get_upload_url(self, file_name, file_size, use_case="multimodal"):
|
414 |
+
url = f'{self.base_url}/files'
|
415 |
+
headers = self.base_headers.copy()
|
416 |
+
try:
|
417 |
+
r = await self.s.post(
|
418 |
+
url,
|
419 |
+
headers=headers,
|
420 |
+
json={"file_name": file_name, "file_size": file_size, "reset_rate_limits": False, "timezone_offset_min": -480, "use_case": use_case},
|
421 |
+
timeout=5,
|
422 |
+
)
|
423 |
+
if r.status_code == 200:
|
424 |
+
res = r.json()
|
425 |
+
file_id = res.get('file_id')
|
426 |
+
upload_url = res.get('upload_url')
|
427 |
+
logger.info(f"file_id: {file_id}, upload_url: {upload_url}")
|
428 |
+
return file_id, upload_url
|
429 |
+
else:
|
430 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
431 |
+
except Exception as e:
|
432 |
+
logger.error(f"Failed to get upload url: {e}")
|
433 |
+
return "", ""
|
434 |
+
|
435 |
+
async def upload(self, upload_url, file_content, mime_type):
|
436 |
+
headers = self.base_headers.copy()
|
437 |
+
headers.update(
|
438 |
+
{
|
439 |
+
'accept': 'application/json, text/plain, */*',
|
440 |
+
'content-type': mime_type,
|
441 |
+
'x-ms-blob-type': 'BlockBlob',
|
442 |
+
'x-ms-version': '2020-04-08',
|
443 |
+
}
|
444 |
+
)
|
445 |
+
headers.pop('authorization', None)
|
446 |
+
headers.pop('oai-device-id', None)
|
447 |
+
headers.pop('oai-language', None)
|
448 |
+
try:
|
449 |
+
r = await self.s.put(upload_url, headers=headers, data=file_content, timeout=60)
|
450 |
+
if r.status_code == 201:
|
451 |
+
return True
|
452 |
+
else:
|
453 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
454 |
+
except Exception as e:
|
455 |
+
logger.error(f"Failed to upload file: {e}")
|
456 |
+
return False
|
457 |
+
|
458 |
+
async def upload_file(self, file_content, mime_type):
|
459 |
+
if not file_content or not mime_type:
|
460 |
+
return None
|
461 |
+
|
462 |
+
width, height = None, None
|
463 |
+
if mime_type.startswith("image/"):
|
464 |
+
try:
|
465 |
+
width, height = await get_image_size(file_content)
|
466 |
+
except Exception as e:
|
467 |
+
logger.error(f"Error image mime_type, change to text/plain: {e}")
|
468 |
+
mime_type = 'text/plain'
|
469 |
+
file_size = len(file_content)
|
470 |
+
file_extension = await get_file_extension(mime_type)
|
471 |
+
file_name = f"{uuid.uuid4()}{file_extension}"
|
472 |
+
use_case = await determine_file_use_case(mime_type)
|
473 |
+
|
474 |
+
file_id, upload_url = await self.get_upload_url(file_name, file_size, use_case)
|
475 |
+
if file_id and upload_url:
|
476 |
+
if await self.upload(upload_url, file_content, mime_type):
|
477 |
+
download_url = await self.get_download_url_from_upload(file_id)
|
478 |
+
if download_url:
|
479 |
+
file_meta = {
|
480 |
+
"file_id": file_id,
|
481 |
+
"file_name": file_name,
|
482 |
+
"size_bytes": file_size,
|
483 |
+
"mime_type": mime_type,
|
484 |
+
"width": width,
|
485 |
+
"height": height,
|
486 |
+
"use_case": use_case,
|
487 |
+
}
|
488 |
+
logger.info(f"File_meta: {file_meta}")
|
489 |
+
return file_meta
|
490 |
+
|
491 |
+
async def check_upload(self, file_id):
|
492 |
+
url = f'{self.base_url}/files/{file_id}'
|
493 |
+
headers = self.base_headers.copy()
|
494 |
+
try:
|
495 |
+
for i in range(30):
|
496 |
+
r = await self.s.get(url, headers=headers, timeout=5)
|
497 |
+
if r.status_code == 200:
|
498 |
+
res = r.json()
|
499 |
+
retrieval_index_status = res.get('retrieval_index_status', '')
|
500 |
+
if retrieval_index_status == "success":
|
501 |
+
break
|
502 |
+
await asyncio.sleep(1)
|
503 |
+
return True
|
504 |
+
except HTTPException:
|
505 |
+
return False
|
506 |
+
|
507 |
+
async def get_response_file_url(self, conversation_id, message_id, sandbox_path):
|
508 |
+
try:
|
509 |
+
url = f"{self.base_url}/conversation/{conversation_id}/interpreter/download"
|
510 |
+
params = {"message_id": message_id, "sandbox_path": sandbox_path}
|
511 |
+
headers = self.base_headers.copy()
|
512 |
+
r = await self.s.get(url, headers=headers, params=params, timeout=10)
|
513 |
+
if r.status_code == 200:
|
514 |
+
return r.json().get("download_url")
|
515 |
+
else:
|
516 |
+
return None
|
517 |
+
except Exception:
|
518 |
+
logger.info("Failed to get response file url")
|
519 |
+
return None
|
520 |
+
|
521 |
+
async def close_client(self):
|
522 |
+
if self.s:
|
523 |
+
await self.s.close()
|
524 |
+
del self.s
|
525 |
+
if self.ss:
|
526 |
+
await self.ss.close()
|
527 |
+
del self.ss
|
528 |
+
if self.ws:
|
529 |
+
await self.ws.close()
|
530 |
+
del self.ws
|
chatgpt/authorization.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
|
5 |
+
from fastapi import HTTPException
|
6 |
+
|
7 |
+
import utils.configs as configs
|
8 |
+
import utils.globals as globals
|
9 |
+
from chatgpt.refreshToken import rt2ac
|
10 |
+
from utils.Logger import logger
|
11 |
+
|
12 |
+
|
13 |
+
def get_req_token(req_token, seed=None):
|
14 |
+
if configs.auto_seed:
|
15 |
+
available_token_list = list(set(globals.token_list) - set(globals.error_token_list))
|
16 |
+
length = len(available_token_list)
|
17 |
+
if seed and length > 0:
|
18 |
+
if seed not in globals.seed_map.keys():
|
19 |
+
globals.seed_map[seed] = {"token": random.choice(available_token_list), "conversations": []}
|
20 |
+
with open(globals.SEED_MAP_FILE, "w") as f:
|
21 |
+
json.dump(globals.seed_map, f, indent=4)
|
22 |
+
else:
|
23 |
+
req_token = globals.seed_map[seed]["token"]
|
24 |
+
return req_token
|
25 |
+
|
26 |
+
if req_token in configs.authorization_list:
|
27 |
+
if len(available_token_list) > 0:
|
28 |
+
if configs.random_token:
|
29 |
+
req_token = random.choice(available_token_list)
|
30 |
+
return req_token
|
31 |
+
else:
|
32 |
+
globals.count += 1
|
33 |
+
globals.count %= length
|
34 |
+
return available_token_list[globals.count]
|
35 |
+
else:
|
36 |
+
return None
|
37 |
+
else:
|
38 |
+
return req_token
|
39 |
+
else:
|
40 |
+
seed = req_token
|
41 |
+
if seed not in globals.seed_map.keys():
|
42 |
+
raise HTTPException(status_code=401, detail={"error": "Invalid Seed"})
|
43 |
+
return globals.seed_map[seed]["token"]
|
44 |
+
|
45 |
+
|
46 |
+
async def verify_token(req_token):
|
47 |
+
if not req_token:
|
48 |
+
if configs.authorization_list:
|
49 |
+
logger.error("Unauthorized with empty token.")
|
50 |
+
raise HTTPException(status_code=401)
|
51 |
+
else:
|
52 |
+
return None
|
53 |
+
else:
|
54 |
+
if req_token.startswith("eyJhbGciOi") or req_token.startswith("fk-"):
|
55 |
+
access_token = req_token
|
56 |
+
return access_token
|
57 |
+
elif len(req_token) == 45:
|
58 |
+
try:
|
59 |
+
if req_token in globals.error_token_list:
|
60 |
+
raise HTTPException(status_code=401, detail="Error RefreshToken")
|
61 |
+
|
62 |
+
access_token = await rt2ac(req_token, force_refresh=False)
|
63 |
+
return access_token
|
64 |
+
except HTTPException as e:
|
65 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
66 |
+
else:
|
67 |
+
return req_token
|
68 |
+
|
69 |
+
|
70 |
+
async def refresh_all_tokens(force_refresh=False):
|
71 |
+
for token in list(set(globals.token_list) - set(globals.error_token_list)):
|
72 |
+
if len(token) == 45:
|
73 |
+
try:
|
74 |
+
await asyncio.sleep(0.5)
|
75 |
+
await rt2ac(token, force_refresh=force_refresh)
|
76 |
+
except HTTPException:
|
77 |
+
pass
|
78 |
+
logger.info("All tokens refreshed.")
|
chatgpt/chatFormat.py
ADDED
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import re
|
5 |
+
import string
|
6 |
+
import time
|
7 |
+
import uuid
|
8 |
+
|
9 |
+
import pybase64
|
10 |
+
import websockets
|
11 |
+
from fastapi import HTTPException
|
12 |
+
|
13 |
+
from api.files import get_file_content
|
14 |
+
from api.models import model_system_fingerprint
|
15 |
+
from api.tokens import split_tokens_from_content, calculate_image_tokens, num_tokens_from_messages
|
16 |
+
from utils.Logger import logger
|
17 |
+
|
18 |
+
moderation_message = "I'm sorry, I cannot provide or engage in any content related to pornography, violence, or any unethical material. If you have any other questions or need assistance, please feel free to let me know. I'll do my best to provide support and assistance."
|
19 |
+
|
20 |
+
|
21 |
+
async def format_not_stream_response(response, prompt_tokens, max_tokens, model):
|
22 |
+
chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
|
23 |
+
system_fingerprint_list = model_system_fingerprint.get(model, None)
|
24 |
+
system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
|
25 |
+
created_time = int(time.time())
|
26 |
+
all_text = ""
|
27 |
+
async for chunk in response:
|
28 |
+
try:
|
29 |
+
if chunk.startswith("data: [DONE]"):
|
30 |
+
break
|
31 |
+
elif not chunk.startswith("data: "):
|
32 |
+
continue
|
33 |
+
else:
|
34 |
+
chunk = json.loads(chunk[6:])
|
35 |
+
if not chunk["choices"][0].get("delta"):
|
36 |
+
continue
|
37 |
+
all_text += chunk["choices"][0]["delta"]["content"]
|
38 |
+
except Exception as e:
|
39 |
+
logger.error(f"Error: {chunk}, error: {str(e)}")
|
40 |
+
continue
|
41 |
+
content, completion_tokens, finish_reason = await split_tokens_from_content(all_text, max_tokens, model)
|
42 |
+
message = {
|
43 |
+
"role": "assistant",
|
44 |
+
"content": content,
|
45 |
+
}
|
46 |
+
usage = {
|
47 |
+
"prompt_tokens": prompt_tokens,
|
48 |
+
"completion_tokens": completion_tokens,
|
49 |
+
"total_tokens": prompt_tokens + completion_tokens
|
50 |
+
}
|
51 |
+
if not message.get("content"):
|
52 |
+
raise HTTPException(status_code=403, detail="No content in the message.")
|
53 |
+
|
54 |
+
data = {
|
55 |
+
"id": chat_id,
|
56 |
+
"object": "chat.completion",
|
57 |
+
"created": created_time,
|
58 |
+
"model": model,
|
59 |
+
"choices": [
|
60 |
+
{
|
61 |
+
"index": 0,
|
62 |
+
"message": message,
|
63 |
+
"logprobs": None,
|
64 |
+
"finish_reason": finish_reason
|
65 |
+
}
|
66 |
+
],
|
67 |
+
"usage": usage
|
68 |
+
}
|
69 |
+
if system_fingerprint:
|
70 |
+
data["system_fingerprint"] = system_fingerprint
|
71 |
+
return data
|
72 |
+
|
73 |
+
|
74 |
+
async def wss_stream_response(websocket, conversation_id):
|
75 |
+
while not websocket.closed:
|
76 |
+
try:
|
77 |
+
message = await asyncio.wait_for(websocket.recv(), timeout=10)
|
78 |
+
if message:
|
79 |
+
resultObj = json.loads(message)
|
80 |
+
sequenceId = resultObj.get("sequenceId", None)
|
81 |
+
if not sequenceId:
|
82 |
+
continue
|
83 |
+
data = resultObj.get("data", {})
|
84 |
+
if conversation_id != data.get("conversation_id", ""):
|
85 |
+
continue
|
86 |
+
sequenceId = resultObj.get('sequenceId')
|
87 |
+
if sequenceId and sequenceId % 80 == 0:
|
88 |
+
await websocket.send(
|
89 |
+
json.dumps(
|
90 |
+
{"type": "sequenceAck", "sequenceId": sequenceId}
|
91 |
+
)
|
92 |
+
)
|
93 |
+
decoded_bytes = pybase64.b64decode(data.get("body", None))
|
94 |
+
yield decoded_bytes
|
95 |
+
else:
|
96 |
+
print("No message received within the specified time.")
|
97 |
+
except asyncio.TimeoutError:
|
98 |
+
logger.error("Timeout! No message received within the specified time.")
|
99 |
+
break
|
100 |
+
except websockets.ConnectionClosed as e:
|
101 |
+
if e.code == 1000:
|
102 |
+
logger.error("WebSocket closed normally with code 1000 (OK)")
|
103 |
+
yield b"data: [DONE]\n\n"
|
104 |
+
else:
|
105 |
+
logger.error(f"WebSocket closed with error code {e.code}")
|
106 |
+
except Exception as e:
|
107 |
+
logger.error(f"Error: {str(e)}")
|
108 |
+
continue
|
109 |
+
|
110 |
+
|
111 |
+
async def head_process_response(response):
|
112 |
+
async for chunk in response:
|
113 |
+
chunk = chunk.decode("utf-8")
|
114 |
+
if chunk.startswith("data: {"):
|
115 |
+
chunk_old_data = json.loads(chunk[6:])
|
116 |
+
message = chunk_old_data.get("message", {})
|
117 |
+
if not message and "error" in chunk_old_data:
|
118 |
+
return response, False
|
119 |
+
role = message.get('author', {}).get('role')
|
120 |
+
if role == 'user' or role == 'system':
|
121 |
+
continue
|
122 |
+
|
123 |
+
status = message.get("status")
|
124 |
+
if status == "in_progress":
|
125 |
+
return response, True
|
126 |
+
return response, False
|
127 |
+
|
128 |
+
|
129 |
+
async def stream_response(service, response, model, max_tokens):
|
130 |
+
chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
|
131 |
+
system_fingerprint_list = model_system_fingerprint.get(model, None)
|
132 |
+
system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
|
133 |
+
created_time = int(time.time())
|
134 |
+
completion_tokens = 0
|
135 |
+
len_last_content = 0
|
136 |
+
len_last_citation = 0
|
137 |
+
last_message_id = None
|
138 |
+
last_role = None
|
139 |
+
last_content_type = None
|
140 |
+
model_slug = None
|
141 |
+
end = False
|
142 |
+
|
143 |
+
chunk_new_data = {
|
144 |
+
"id": chat_id,
|
145 |
+
"object": "chat.completion.chunk",
|
146 |
+
"created": created_time,
|
147 |
+
"model": model,
|
148 |
+
"choices": [
|
149 |
+
{
|
150 |
+
"index": 0,
|
151 |
+
"delta": {"role": "assistant", "content": ""},
|
152 |
+
"logprobs": None,
|
153 |
+
"finish_reason": None
|
154 |
+
}
|
155 |
+
]
|
156 |
+
}
|
157 |
+
if system_fingerprint:
|
158 |
+
chunk_new_data["system_fingerprint"] = system_fingerprint
|
159 |
+
yield f"data: {json.dumps(chunk_new_data)}\n\n"
|
160 |
+
|
161 |
+
async for chunk in response:
|
162 |
+
chunk = chunk.decode("utf-8")
|
163 |
+
if end:
|
164 |
+
logger.info(f"Response Model: {model_slug}")
|
165 |
+
yield "data: [DONE]\n\n"
|
166 |
+
break
|
167 |
+
try:
|
168 |
+
if chunk.startswith("data: {"):
|
169 |
+
chunk_old_data = json.loads(chunk[6:])
|
170 |
+
finish_reason = None
|
171 |
+
message = chunk_old_data.get("message", {})
|
172 |
+
conversation_id = chunk_old_data.get("conversation_id")
|
173 |
+
role = message.get('author', {}).get('role')
|
174 |
+
if role == 'user' or role == 'system':
|
175 |
+
continue
|
176 |
+
|
177 |
+
status = message.get("status")
|
178 |
+
message_id = message.get("id")
|
179 |
+
content = message.get("content", {})
|
180 |
+
recipient = message.get("recipient", "")
|
181 |
+
meta_data = message.get("metadata", {})
|
182 |
+
initial_text = meta_data.get("initial_text", "")
|
183 |
+
model_slug = meta_data.get("model_slug", model_slug)
|
184 |
+
|
185 |
+
if not message and chunk_old_data.get("type") == "moderation":
|
186 |
+
delta = {"role": "assistant", "content": moderation_message}
|
187 |
+
finish_reason = "stop"
|
188 |
+
end = True
|
189 |
+
elif status == "in_progress":
|
190 |
+
outer_content_type = content.get("content_type")
|
191 |
+
if outer_content_type == "text":
|
192 |
+
part = content.get("parts", [])[0]
|
193 |
+
if not part:
|
194 |
+
if role == 'assistant' and last_role != 'assistant':
|
195 |
+
if last_role == None:
|
196 |
+
new_text = ""
|
197 |
+
else:
|
198 |
+
new_text = f"\n"
|
199 |
+
elif role == 'tool' and last_role != 'tool':
|
200 |
+
new_text = f">{initial_text}\n"
|
201 |
+
else:
|
202 |
+
new_text = ""
|
203 |
+
else:
|
204 |
+
if last_message_id and last_message_id != message_id:
|
205 |
+
continue
|
206 |
+
citation = message.get("metadata", {}).get("citations", [])
|
207 |
+
if len(citation) > len_last_citation:
|
208 |
+
inside_metadata = citation[-1].get("metadata", {})
|
209 |
+
citation_title = inside_metadata.get("title", "")
|
210 |
+
citation_url = inside_metadata.get("url", "")
|
211 |
+
new_text = f' **[[""]]({citation_url} "{citation_title}")** '
|
212 |
+
len_last_citation = len(citation)
|
213 |
+
else:
|
214 |
+
if role == 'assistant' and last_role != 'assistant':
|
215 |
+
if recipient == 'dalle.text2im':
|
216 |
+
new_text = f"\n```{recipient}\n{part[len_last_content:]}"
|
217 |
+
elif last_role == None:
|
218 |
+
new_text = part[len_last_content:]
|
219 |
+
else:
|
220 |
+
new_text = f"\n\n{part[len_last_content:]}"
|
221 |
+
elif role == 'tool' and last_role != 'tool':
|
222 |
+
new_text = f">{initial_text}\n{part[len_last_content:]}"
|
223 |
+
elif role == 'tool':
|
224 |
+
new_text = part[len_last_content:].replace("\n\n", "\n")
|
225 |
+
else:
|
226 |
+
new_text = part[len_last_content:]
|
227 |
+
len_last_content = len(part)
|
228 |
+
else:
|
229 |
+
text = content.get("text", "")
|
230 |
+
if outer_content_type == "code" and last_content_type != "code":
|
231 |
+
language = content.get("language", "")
|
232 |
+
if not language or language == "unknown":
|
233 |
+
language = recipient
|
234 |
+
new_text = "\n```" + language + "\n" + text[len_last_content:]
|
235 |
+
elif outer_content_type == "execution_output" and last_content_type != "execution_output":
|
236 |
+
new_text = "\n```" + "Output" + "\n" + text[len_last_content:]
|
237 |
+
else:
|
238 |
+
new_text = text[len_last_content:]
|
239 |
+
len_last_content = len(text)
|
240 |
+
if last_content_type == "code" and outer_content_type != "code":
|
241 |
+
new_text = "\n```\n" + new_text
|
242 |
+
elif last_content_type == "execution_output" and outer_content_type != "execution_output":
|
243 |
+
new_text = "\n```\n" + new_text
|
244 |
+
|
245 |
+
delta = {"content": new_text}
|
246 |
+
last_content_type = outer_content_type
|
247 |
+
if completion_tokens >= max_tokens:
|
248 |
+
delta = {}
|
249 |
+
finish_reason = "length"
|
250 |
+
end = True
|
251 |
+
elif status == "finished_successfully":
|
252 |
+
if content.get("content_type") == "multimodal_text":
|
253 |
+
parts = content.get("parts", [])
|
254 |
+
delta = {}
|
255 |
+
for part in parts:
|
256 |
+
if isinstance(part, str):
|
257 |
+
continue
|
258 |
+
inner_content_type = part.get('content_type')
|
259 |
+
if inner_content_type == "image_asset_pointer":
|
260 |
+
last_content_type = "image_asset_pointer"
|
261 |
+
file_id = part.get('asset_pointer').replace('file-service://', '')
|
262 |
+
logger.debug(f"file_id: {file_id}")
|
263 |
+
image_download_url = await service.get_download_url(file_id)
|
264 |
+
logger.debug(f"image_download_url: {image_download_url}")
|
265 |
+
if image_download_url:
|
266 |
+
delta = {"content": f"\n```\n\n"}
|
267 |
+
else:
|
268 |
+
delta = {"content": f"\n```\nFailed to load the image.\n"}
|
269 |
+
elif message.get("end_turn"):
|
270 |
+
part = content.get("parts", [])[0]
|
271 |
+
new_text = part[len_last_content:]
|
272 |
+
if not new_text:
|
273 |
+
matches = re.findall(r'\(sandbox:(.*?)\)', part)
|
274 |
+
if matches:
|
275 |
+
file_url_content = ""
|
276 |
+
for i, sandbox_path in enumerate(matches):
|
277 |
+
file_download_url = await service.get_response_file_url(conversation_id, message_id, sandbox_path)
|
278 |
+
if file_download_url:
|
279 |
+
file_url_content += f"\n```\n\n\n"
|
280 |
+
delta = {"content": file_url_content}
|
281 |
+
else:
|
282 |
+
delta = {}
|
283 |
+
else:
|
284 |
+
delta = {"content": new_text}
|
285 |
+
finish_reason = "stop"
|
286 |
+
end = True
|
287 |
+
else:
|
288 |
+
len_last_content = 0
|
289 |
+
if meta_data.get("finished_text"):
|
290 |
+
delta = {"content": f"\n{meta_data.get('finished_text')}\n"}
|
291 |
+
else:
|
292 |
+
continue
|
293 |
+
else:
|
294 |
+
continue
|
295 |
+
last_message_id = message_id
|
296 |
+
last_role = role
|
297 |
+
if not end and not delta.get("content"):
|
298 |
+
delta = {"role": "assistant", "content": ""}
|
299 |
+
chunk_new_data["choices"][0]["delta"] = delta
|
300 |
+
chunk_new_data["choices"][0]["finish_reason"] = finish_reason
|
301 |
+
if not service.history_disabled:
|
302 |
+
chunk_new_data.update({
|
303 |
+
"message_id": message_id,
|
304 |
+
"conversation_id": conversation_id,
|
305 |
+
})
|
306 |
+
completion_tokens += 1
|
307 |
+
yield f"data: {json.dumps(chunk_new_data)}\n\n"
|
308 |
+
elif chunk.startswith("data: [DONE]"):
|
309 |
+
logger.info(f"Response Model: {model_slug}")
|
310 |
+
yield "data: [DONE]\n\n"
|
311 |
+
else:
|
312 |
+
continue
|
313 |
+
except Exception as e:
|
314 |
+
if chunk.startswith("data: "):
|
315 |
+
chunk_data = json.loads(chunk[6:])
|
316 |
+
if chunk_data.get("error"):
|
317 |
+
logger.error(f"Error: {chunk_data.get('error')}")
|
318 |
+
yield "data: [DONE]\n\n"
|
319 |
+
break
|
320 |
+
logger.error(f"Error: {chunk}, details: {str(e)}")
|
321 |
+
continue
|
322 |
+
|
323 |
+
|
324 |
+
def get_url_from_content(content):
|
325 |
+
if isinstance(content, str) and content.startswith('http'):
|
326 |
+
try:
|
327 |
+
url = re.match(
|
328 |
+
r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
|
329 |
+
content.split(' ')[0])[0]
|
330 |
+
content = content.replace(url, '').strip()
|
331 |
+
return url, content
|
332 |
+
except Exception:
|
333 |
+
return None, content
|
334 |
+
return None, content
|
335 |
+
|
336 |
+
|
337 |
+
def format_messages_with_url(content):
|
338 |
+
url_list = []
|
339 |
+
while True:
|
340 |
+
url, content = get_url_from_content(content)
|
341 |
+
if url:
|
342 |
+
url_list.append(url)
|
343 |
+
logger.info(f"Found a file_url from messages: {url}")
|
344 |
+
else:
|
345 |
+
break
|
346 |
+
if not url_list:
|
347 |
+
return content
|
348 |
+
new_content = [
|
349 |
+
{
|
350 |
+
"type": "text",
|
351 |
+
"text": content
|
352 |
+
}
|
353 |
+
]
|
354 |
+
for url in url_list:
|
355 |
+
new_content.append({
|
356 |
+
"type": "image_url",
|
357 |
+
"image_url": {
|
358 |
+
"url": url
|
359 |
+
}
|
360 |
+
})
|
361 |
+
return new_content
|
362 |
+
|
363 |
+
|
364 |
+
async def api_messages_to_chat(service, api_messages, upload_by_url=False):
|
365 |
+
file_tokens = 0
|
366 |
+
chat_messages = []
|
367 |
+
for api_message in api_messages:
|
368 |
+
role = api_message.get('role')
|
369 |
+
content = api_message.get('content')
|
370 |
+
if upload_by_url:
|
371 |
+
if isinstance(content, str):
|
372 |
+
content = format_messages_with_url(content)
|
373 |
+
if isinstance(content, list):
|
374 |
+
parts = []
|
375 |
+
attachments = []
|
376 |
+
content_type = "multimodal_text"
|
377 |
+
for i in content:
|
378 |
+
if i.get("type") == "text":
|
379 |
+
parts.append(i.get("text"))
|
380 |
+
elif i.get("type") == "image_url":
|
381 |
+
image_url = i.get("image_url")
|
382 |
+
url = image_url.get("url")
|
383 |
+
detail = image_url.get("detail", "auto")
|
384 |
+
file_content, mime_type = await get_file_content(url)
|
385 |
+
file_meta = await service.upload_file(file_content, mime_type)
|
386 |
+
if file_meta:
|
387 |
+
file_id = file_meta["file_id"]
|
388 |
+
file_size = file_meta["size_bytes"]
|
389 |
+
file_name = file_meta["file_name"]
|
390 |
+
mime_type = file_meta["mime_type"]
|
391 |
+
use_case = file_meta["use_case"]
|
392 |
+
if mime_type.startswith("image/"):
|
393 |
+
width, height = file_meta["width"], file_meta["height"]
|
394 |
+
file_tokens += await calculate_image_tokens(width, height, detail)
|
395 |
+
parts.append({
|
396 |
+
"content_type": "image_asset_pointer",
|
397 |
+
"asset_pointer": f"file-service://{file_id}",
|
398 |
+
"size_bytes": file_size,
|
399 |
+
"width": width,
|
400 |
+
"height": height
|
401 |
+
})
|
402 |
+
attachments.append({
|
403 |
+
"id": file_id,
|
404 |
+
"size": file_size,
|
405 |
+
"name": file_name,
|
406 |
+
"mime_type": mime_type,
|
407 |
+
"width": width,
|
408 |
+
"height": height
|
409 |
+
})
|
410 |
+
else:
|
411 |
+
if not use_case == "ace_upload":
|
412 |
+
await service.check_upload(file_id)
|
413 |
+
file_tokens += file_size // 1000
|
414 |
+
attachments.append({
|
415 |
+
"id": file_id,
|
416 |
+
"size": file_size,
|
417 |
+
"name": file_name,
|
418 |
+
"mime_type": mime_type,
|
419 |
+
})
|
420 |
+
metadata = {
|
421 |
+
"attachments": attachments
|
422 |
+
}
|
423 |
+
else:
|
424 |
+
content_type = "text"
|
425 |
+
parts = [content]
|
426 |
+
metadata = {}
|
427 |
+
chat_message = {
|
428 |
+
"id": f"{uuid.uuid4()}",
|
429 |
+
"author": {"role": role},
|
430 |
+
"content": {"content_type": content_type, "parts": parts},
|
431 |
+
"metadata": metadata
|
432 |
+
}
|
433 |
+
chat_messages.append(chat_message)
|
434 |
+
text_tokens = await num_tokens_from_messages(api_messages, service.resp_model)
|
435 |
+
prompt_tokens = text_tokens + file_tokens
|
436 |
+
return chat_messages, prompt_tokens
|
chatgpt/chatLimit.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from datetime import datetime
|
3 |
+
|
4 |
+
from utils.Logger import logger
|
5 |
+
|
6 |
+
limit_details = {}
|
7 |
+
|
8 |
+
|
9 |
+
def check_is_limit(detail, token, model):
|
10 |
+
if token and isinstance(detail, dict) and detail.get('clears_in'):
|
11 |
+
clear_time = int(time.time()) + detail.get('clears_in')
|
12 |
+
limit_details.setdefault(token, {})[model] = clear_time
|
13 |
+
logger.info(f"{token[:40]}: Reached {model} limit, will be cleared at {datetime.fromtimestamp(clear_time).replace(microsecond=0)}")
|
14 |
+
|
15 |
+
|
16 |
+
async def handle_request_limit(token, model):
|
17 |
+
try:
|
18 |
+
if limit_details.get(token) and model in limit_details[token]:
|
19 |
+
limit_time = limit_details[token][model]
|
20 |
+
is_limit = limit_time > int(time.time())
|
21 |
+
if is_limit:
|
22 |
+
clear_date = datetime.fromtimestamp(limit_time).replace(microsecond=0)
|
23 |
+
result = f"Request limit exceeded. You can continue with the default model now, or try again after {clear_date}"
|
24 |
+
logger.info(result)
|
25 |
+
return result
|
26 |
+
else:
|
27 |
+
del limit_details[token][model]
|
28 |
+
return None
|
29 |
+
except KeyError as e:
|
30 |
+
logger.error(f"Key error: {e}")
|
31 |
+
return None
|
32 |
+
except Exception as e:
|
33 |
+
logger.error(f"An unexpected error occurred: {e}")
|
34 |
+
return None
|
chatgpt/fp.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
import uuid
|
4 |
+
|
5 |
+
import ua_generator
|
6 |
+
from ua_generator.data.version import VersionRange
|
7 |
+
from ua_generator.options import Options
|
8 |
+
|
9 |
+
import utils.globals as globals
|
10 |
+
from utils import configs
|
11 |
+
|
12 |
+
|
13 |
+
def get_fp(req_token):
|
14 |
+
fp = globals.fp_map.get(req_token, {})
|
15 |
+
if fp and fp.get("user-agent") and fp.get("impersonate"):
|
16 |
+
if "proxy_url" in fp.keys() and (fp["proxy_url"] is None or fp["proxy_url"] not in configs.proxy_url_list):
|
17 |
+
fp["proxy_url"] = random.choice(configs.proxy_url_list) if configs.proxy_url_list else None
|
18 |
+
globals.fp_map[req_token] = fp
|
19 |
+
with open(globals.FP_FILE, "w", encoding="utf-8") as f:
|
20 |
+
json.dump(globals.fp_map, f, indent=4)
|
21 |
+
if globals.impersonate_list and "impersonate" in fp.keys() and fp["impersonate"] not in globals.impersonate_list:
|
22 |
+
fp["impersonate"] = random.choice(globals.impersonate_list)
|
23 |
+
globals.fp_map[req_token] = fp
|
24 |
+
with open(globals.FP_FILE, "w", encoding="utf-8") as f:
|
25 |
+
json.dump(globals.fp_map, f, indent=4)
|
26 |
+
if configs.user_agents_list and "user-agent" in fp.keys() and fp["user-agent"] not in configs.user_agents_list:
|
27 |
+
fp["user-agent"] = random.choice(configs.user_agents_list)
|
28 |
+
globals.fp_map[req_token] = fp
|
29 |
+
with open(globals.FP_FILE, "w", encoding="utf-8") as f:
|
30 |
+
json.dump(globals.fp_map, f, indent=4)
|
31 |
+
fp = {k.lower(): v for k, v in fp.items()}
|
32 |
+
return fp
|
33 |
+
else:
|
34 |
+
options = Options(version_ranges={
|
35 |
+
'chrome': VersionRange(min_version=124),
|
36 |
+
'edge': VersionRange(min_version=124),
|
37 |
+
})
|
38 |
+
ua = ua_generator.generate(
|
39 |
+
device=configs.device_tuple if configs.device_tuple else ('desktop'),
|
40 |
+
browser=configs.browser_tuple if configs.browser_tuple else ('chrome', 'edge', 'firefox', 'safari'),
|
41 |
+
platform=configs.platform_tuple if configs.platform_tuple else ('windows', 'macos'),
|
42 |
+
options=options
|
43 |
+
)
|
44 |
+
fp = {
|
45 |
+
"user-agent": ua.text if not configs.user_agents_list else random.choice(configs.user_agents_list),
|
46 |
+
"impersonate": random.choice(globals.impersonate_list),
|
47 |
+
"proxy_url": random.choice(configs.proxy_url_list) if configs.proxy_url_list else None,
|
48 |
+
"oai-device-id": str(uuid.uuid4())
|
49 |
+
}
|
50 |
+
if ua.device == "desktop" and ua.browser in ("chrome", "edge"):
|
51 |
+
fp["sec-ch-ua-platform"] = ua.ch.platform
|
52 |
+
fp["sec-ch-ua"] = ua.ch.brands
|
53 |
+
fp["sec-ch-ua-mobile"] = ua.ch.mobile
|
54 |
+
|
55 |
+
if not req_token:
|
56 |
+
return fp
|
57 |
+
else:
|
58 |
+
globals.fp_map[req_token] = fp
|
59 |
+
with open(globals.FP_FILE, "w", encoding="utf-8") as f:
|
60 |
+
json.dump(globals.fp_map, f, indent=4)
|
61 |
+
return fp
|
chatgpt/proofofWork.py
ADDED
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import re
|
5 |
+
import time
|
6 |
+
import uuid
|
7 |
+
from datetime import datetime, timedelta, timezone
|
8 |
+
from html.parser import HTMLParser
|
9 |
+
|
10 |
+
import pybase64
|
11 |
+
|
12 |
+
from utils.Logger import logger
|
13 |
+
from utils.configs import conversation_only
|
14 |
+
|
15 |
+
cores = [8, 16, 24, 32]
|
16 |
+
timeLayout = "%a %b %d %Y %H:%M:%S"
|
17 |
+
|
18 |
+
cached_scripts = []
|
19 |
+
cached_dpl = ""
|
20 |
+
cached_time = 0
|
21 |
+
cached_require_proof = ""
|
22 |
+
|
23 |
+
navigator_key = [
|
24 |
+
"registerProtocolHandler−function registerProtocolHandler() { [native code] }",
|
25 |
+
"storage−[object StorageManager]",
|
26 |
+
"locks−[object LockManager]",
|
27 |
+
"appCodeName−Mozilla",
|
28 |
+
"permissions−[object Permissions]",
|
29 |
+
"share−function share() { [native code] }",
|
30 |
+
"webdriver−false",
|
31 |
+
"managed−[object NavigatorManagedData]",
|
32 |
+
"canShare−function canShare() { [native code] }",
|
33 |
+
"vendor−Google Inc.",
|
34 |
+
"vendor−Google Inc.",
|
35 |
+
"mediaDevices−[object MediaDevices]",
|
36 |
+
"vibrate−function vibrate() { [native code] }",
|
37 |
+
"storageBuckets−[object StorageBucketManager]",
|
38 |
+
"mediaCapabilities−[object MediaCapabilities]",
|
39 |
+
"getGamepads−function getGamepads() { [native code] }",
|
40 |
+
"bluetooth−[object Bluetooth]",
|
41 |
+
"share−function share() { [native code] }",
|
42 |
+
"cookieEnabled−true",
|
43 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
44 |
+
"product−Gecko",
|
45 |
+
"mediaDevices−[object MediaDevices]",
|
46 |
+
"canShare−function canShare() { [native code] }",
|
47 |
+
"getGamepads−function getGamepads() { [native code] }",
|
48 |
+
"product−Gecko",
|
49 |
+
"xr−[object XRSystem]",
|
50 |
+
"clipboard−[object Clipboard]",
|
51 |
+
"storageBuckets−[object StorageBucketManager]",
|
52 |
+
"unregisterProtocolHandler−function unregisterProtocolHandler() { [native code] }",
|
53 |
+
"productSub−20030107",
|
54 |
+
"login−[object NavigatorLogin]",
|
55 |
+
"vendorSub−",
|
56 |
+
"login−[object NavigatorLogin]",
|
57 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
58 |
+
"mediaDevices−[object MediaDevices]",
|
59 |
+
"locks−[object LockManager]",
|
60 |
+
"webkitGetUserMedia−function webkitGetUserMedia() { [native code] }",
|
61 |
+
"vendor−Google Inc.",
|
62 |
+
"xr−[object XRSystem]",
|
63 |
+
"mediaDevices−[object MediaDevices]",
|
64 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
65 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
66 |
+
"appName−Netscape",
|
67 |
+
"storageBuckets−[object StorageBucketManager]",
|
68 |
+
"presentation−[object Presentation]",
|
69 |
+
"onLine−true",
|
70 |
+
"mimeTypes−[object MimeTypeArray]",
|
71 |
+
"credentials−[object CredentialsContainer]",
|
72 |
+
"presentation−[object Presentation]",
|
73 |
+
"getGamepads−function getGamepads() { [native code] }",
|
74 |
+
"vendorSub−",
|
75 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
76 |
+
"serviceWorker−[object ServiceWorkerContainer]",
|
77 |
+
"xr−[object XRSystem]",
|
78 |
+
"product−Gecko",
|
79 |
+
"keyboard−[object Keyboard]",
|
80 |
+
"gpu−[object GPU]",
|
81 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
82 |
+
"webkitPersistentStorage−[object DeprecatedStorageQuota]",
|
83 |
+
"doNotTrack",
|
84 |
+
"clearAppBadge−function clearAppBadge() { [native code] }",
|
85 |
+
"presentation−[object Presentation]",
|
86 |
+
"serial−[object Serial]",
|
87 |
+
"locks−[object LockManager]",
|
88 |
+
"requestMIDIAccess−function requestMIDIAccess() { [native code] }",
|
89 |
+
"locks−[object LockManager]",
|
90 |
+
"requestMediaKeySystemAccess−function requestMediaKeySystemAccess() { [native code] }",
|
91 |
+
"vendor−Google Inc.",
|
92 |
+
"pdfViewerEnabled−true",
|
93 |
+
"language−zh-CN",
|
94 |
+
"setAppBadge−function setAppBadge() { [native code] }",
|
95 |
+
"geolocation−[object Geolocation]",
|
96 |
+
"userAgentData−[object NavigatorUAData]",
|
97 |
+
"mediaCapabilities−[object MediaCapabilities]",
|
98 |
+
"requestMIDIAccess−function requestMIDIAccess() { [native code] }",
|
99 |
+
"getUserMedia−function getUserMedia() { [native code] }",
|
100 |
+
"mediaDevices−[object MediaDevices]",
|
101 |
+
"webkitPersistentStorage−[object DeprecatedStorageQuota]",
|
102 |
+
"sendBeacon−function sendBeacon() { [native code] }",
|
103 |
+
"hardwareConcurrency−32",
|
104 |
+
"credentials−[object CredentialsContainer]",
|
105 |
+
"storage−[object StorageManager]",
|
106 |
+
"cookieEnabled−true",
|
107 |
+
"pdfViewerEnabled−true",
|
108 |
+
"windowControlsOverlay−[object WindowControlsOverlay]",
|
109 |
+
"scheduling−[object Scheduling]",
|
110 |
+
"pdfViewerEnabled−true",
|
111 |
+
"hardwareConcurrency−32",
|
112 |
+
"xr−[object XRSystem]",
|
113 |
+
"webdriver−false",
|
114 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
115 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
116 |
+
"bluetooth−[object Bluetooth]"
|
117 |
+
]
|
118 |
+
document_key = ['_reactListeningo743lnnpvdg', 'location']
|
119 |
+
window_key = [
|
120 |
+
"0",
|
121 |
+
"window",
|
122 |
+
"self",
|
123 |
+
"document",
|
124 |
+
"name",
|
125 |
+
"location",
|
126 |
+
"customElements",
|
127 |
+
"history",
|
128 |
+
"navigation",
|
129 |
+
"locationbar",
|
130 |
+
"menubar",
|
131 |
+
"personalbar",
|
132 |
+
"scrollbars",
|
133 |
+
"statusbar",
|
134 |
+
"toolbar",
|
135 |
+
"status",
|
136 |
+
"closed",
|
137 |
+
"frames",
|
138 |
+
"length",
|
139 |
+
"top",
|
140 |
+
"opener",
|
141 |
+
"parent",
|
142 |
+
"frameElement",
|
143 |
+
"navigator",
|
144 |
+
"origin",
|
145 |
+
"external",
|
146 |
+
"screen",
|
147 |
+
"innerWidth",
|
148 |
+
"innerHeight",
|
149 |
+
"scrollX",
|
150 |
+
"pageXOffset",
|
151 |
+
"scrollY",
|
152 |
+
"pageYOffset",
|
153 |
+
"visualViewport",
|
154 |
+
"screenX",
|
155 |
+
"screenY",
|
156 |
+
"outerWidth",
|
157 |
+
"outerHeight",
|
158 |
+
"devicePixelRatio",
|
159 |
+
"clientInformation",
|
160 |
+
"screenLeft",
|
161 |
+
"screenTop",
|
162 |
+
"styleMedia",
|
163 |
+
"onsearch",
|
164 |
+
"isSecureContext",
|
165 |
+
"trustedTypes",
|
166 |
+
"performance",
|
167 |
+
"onappinstalled",
|
168 |
+
"onbeforeinstallprompt",
|
169 |
+
"crypto",
|
170 |
+
"indexedDB",
|
171 |
+
"sessionStorage",
|
172 |
+
"localStorage",
|
173 |
+
"onbeforexrselect",
|
174 |
+
"onabort",
|
175 |
+
"onbeforeinput",
|
176 |
+
"onbeforematch",
|
177 |
+
"onbeforetoggle",
|
178 |
+
"onblur",
|
179 |
+
"oncancel",
|
180 |
+
"oncanplay",
|
181 |
+
"oncanplaythrough",
|
182 |
+
"onchange",
|
183 |
+
"onclick",
|
184 |
+
"onclose",
|
185 |
+
"oncontentvisibilityautostatechange",
|
186 |
+
"oncontextlost",
|
187 |
+
"oncontextmenu",
|
188 |
+
"oncontextrestored",
|
189 |
+
"oncuechange",
|
190 |
+
"ondblclick",
|
191 |
+
"ondrag",
|
192 |
+
"ondragend",
|
193 |
+
"ondragenter",
|
194 |
+
"ondragleave",
|
195 |
+
"ondragover",
|
196 |
+
"ondragstart",
|
197 |
+
"ondrop",
|
198 |
+
"ondurationchange",
|
199 |
+
"onemptied",
|
200 |
+
"onended",
|
201 |
+
"onerror",
|
202 |
+
"onfocus",
|
203 |
+
"onformdata",
|
204 |
+
"oninput",
|
205 |
+
"oninvalid",
|
206 |
+
"onkeydown",
|
207 |
+
"onkeypress",
|
208 |
+
"onkeyup",
|
209 |
+
"onload",
|
210 |
+
"onloadeddata",
|
211 |
+
"onloadedmetadata",
|
212 |
+
"onloadstart",
|
213 |
+
"onmousedown",
|
214 |
+
"onmouseenter",
|
215 |
+
"onmouseleave",
|
216 |
+
"onmousemove",
|
217 |
+
"onmouseout",
|
218 |
+
"onmouseover",
|
219 |
+
"onmouseup",
|
220 |
+
"onmousewheel",
|
221 |
+
"onpause",
|
222 |
+
"onplay",
|
223 |
+
"onplaying",
|
224 |
+
"onprogress",
|
225 |
+
"onratechange",
|
226 |
+
"onreset",
|
227 |
+
"onresize",
|
228 |
+
"onscroll",
|
229 |
+
"onsecuritypolicyviolation",
|
230 |
+
"onseeked",
|
231 |
+
"onseeking",
|
232 |
+
"onselect",
|
233 |
+
"onslotchange",
|
234 |
+
"onstalled",
|
235 |
+
"onsubmit",
|
236 |
+
"onsuspend",
|
237 |
+
"ontimeupdate",
|
238 |
+
"ontoggle",
|
239 |
+
"onvolumechange",
|
240 |
+
"onwaiting",
|
241 |
+
"onwebkitanimationend",
|
242 |
+
"onwebkitanimationiteration",
|
243 |
+
"onwebkitanimationstart",
|
244 |
+
"onwebkittransitionend",
|
245 |
+
"onwheel",
|
246 |
+
"onauxclick",
|
247 |
+
"ongotpointercapture",
|
248 |
+
"onlostpointercapture",
|
249 |
+
"onpointerdown",
|
250 |
+
"onpointermove",
|
251 |
+
"onpointerrawupdate",
|
252 |
+
"onpointerup",
|
253 |
+
"onpointercancel",
|
254 |
+
"onpointerover",
|
255 |
+
"onpointerout",
|
256 |
+
"onpointerenter",
|
257 |
+
"onpointerleave",
|
258 |
+
"onselectstart",
|
259 |
+
"onselectionchange",
|
260 |
+
"onanimationend",
|
261 |
+
"onanimationiteration",
|
262 |
+
"onanimationstart",
|
263 |
+
"ontransitionrun",
|
264 |
+
"ontransitionstart",
|
265 |
+
"ontransitionend",
|
266 |
+
"ontransitioncancel",
|
267 |
+
"onafterprint",
|
268 |
+
"onbeforeprint",
|
269 |
+
"onbeforeunload",
|
270 |
+
"onhashchange",
|
271 |
+
"onlanguagechange",
|
272 |
+
"onmessage",
|
273 |
+
"onmessageerror",
|
274 |
+
"onoffline",
|
275 |
+
"ononline",
|
276 |
+
"onpagehide",
|
277 |
+
"onpageshow",
|
278 |
+
"onpopstate",
|
279 |
+
"onrejectionhandled",
|
280 |
+
"onstorage",
|
281 |
+
"onunhandledrejection",
|
282 |
+
"onunload",
|
283 |
+
"crossOriginIsolated",
|
284 |
+
"scheduler",
|
285 |
+
"alert",
|
286 |
+
"atob",
|
287 |
+
"blur",
|
288 |
+
"btoa",
|
289 |
+
"cancelAnimationFrame",
|
290 |
+
"cancelIdleCallback",
|
291 |
+
"captureEvents",
|
292 |
+
"clearInterval",
|
293 |
+
"clearTimeout",
|
294 |
+
"close",
|
295 |
+
"confirm",
|
296 |
+
"createImageBitmap",
|
297 |
+
"fetch",
|
298 |
+
"find",
|
299 |
+
"focus",
|
300 |
+
"getComputedStyle",
|
301 |
+
"getSelection",
|
302 |
+
"matchMedia",
|
303 |
+
"moveBy",
|
304 |
+
"moveTo",
|
305 |
+
"open",
|
306 |
+
"postMessage",
|
307 |
+
"print",
|
308 |
+
"prompt",
|
309 |
+
"queueMicrotask",
|
310 |
+
"releaseEvents",
|
311 |
+
"reportError",
|
312 |
+
"requestAnimationFrame",
|
313 |
+
"requestIdleCallback",
|
314 |
+
"resizeBy",
|
315 |
+
"resizeTo",
|
316 |
+
"scroll",
|
317 |
+
"scrollBy",
|
318 |
+
"scrollTo",
|
319 |
+
"setInterval",
|
320 |
+
"setTimeout",
|
321 |
+
"stop",
|
322 |
+
"structuredClone",
|
323 |
+
"webkitCancelAnimationFrame",
|
324 |
+
"webkitRequestAnimationFrame",
|
325 |
+
"chrome",
|
326 |
+
"caches",
|
327 |
+
"cookieStore",
|
328 |
+
"ondevicemotion",
|
329 |
+
"ondeviceorientation",
|
330 |
+
"ondeviceorientationabsolute",
|
331 |
+
"launchQueue",
|
332 |
+
"documentPictureInPicture",
|
333 |
+
"getScreenDetails",
|
334 |
+
"queryLocalFonts",
|
335 |
+
"showDirectoryPicker",
|
336 |
+
"showOpenFilePicker",
|
337 |
+
"showSaveFilePicker",
|
338 |
+
"originAgentCluster",
|
339 |
+
"onpageswap",
|
340 |
+
"onpagereveal",
|
341 |
+
"credentialless",
|
342 |
+
"speechSynthesis",
|
343 |
+
"onscrollend",
|
344 |
+
"webkitRequestFileSystem",
|
345 |
+
"webkitResolveLocalFileSystemURL",
|
346 |
+
"sendMsgToSolverCS",
|
347 |
+
"webpackChunk_N_E",
|
348 |
+
"__next_set_public_path__",
|
349 |
+
"next",
|
350 |
+
"__NEXT_DATA__",
|
351 |
+
"__SSG_MANIFEST_CB",
|
352 |
+
"__NEXT_P",
|
353 |
+
"_N_E",
|
354 |
+
"regeneratorRuntime",
|
355 |
+
"__REACT_INTL_CONTEXT__",
|
356 |
+
"DD_RUM",
|
357 |
+
"_",
|
358 |
+
"filterCSS",
|
359 |
+
"filterXSS",
|
360 |
+
"__SEGMENT_INSPECTOR__",
|
361 |
+
"__NEXT_PRELOADREADY",
|
362 |
+
"Intercom",
|
363 |
+
"__MIDDLEWARE_MATCHERS",
|
364 |
+
"__STATSIG_SDK__",
|
365 |
+
"__STATSIG_JS_SDK__",
|
366 |
+
"__STATSIG_RERENDER_OVERRIDE__",
|
367 |
+
"_oaiHandleSessionExpired",
|
368 |
+
"__BUILD_MANIFEST",
|
369 |
+
"__SSG_MANIFEST",
|
370 |
+
"__intercomAssignLocation",
|
371 |
+
"__intercomReloadLocation"
|
372 |
+
]
|
373 |
+
|
374 |
+
|
375 |
+
class ScriptSrcParser(HTMLParser):
|
376 |
+
def handle_starttag(self, tag, attrs):
|
377 |
+
global cached_scripts, cached_dpl, cached_time
|
378 |
+
if tag == "script":
|
379 |
+
attrs_dict = dict(attrs)
|
380 |
+
if "src" in attrs_dict:
|
381 |
+
src = attrs_dict["src"]
|
382 |
+
cached_scripts.append(src)
|
383 |
+
match = re.search(r"c/[^/]*/_", src)
|
384 |
+
if match:
|
385 |
+
cached_dpl = match.group(0)
|
386 |
+
cached_time = int(time.time())
|
387 |
+
|
388 |
+
|
389 |
+
def get_data_build_from_html(html_content):
|
390 |
+
global cached_scripts, cached_dpl, cached_time
|
391 |
+
parser = ScriptSrcParser()
|
392 |
+
parser.feed(html_content)
|
393 |
+
if not cached_scripts:
|
394 |
+
cached_scripts.append("https://chatgpt.com/backend-api/sentinel/sdk.js")
|
395 |
+
if not cached_dpl:
|
396 |
+
match = re.search(r'<html[^>]*data-build="([^"]*)"', html_content)
|
397 |
+
if match:
|
398 |
+
data_build = match.group(1)
|
399 |
+
cached_dpl = data_build
|
400 |
+
cached_time = int(time.time())
|
401 |
+
logger.info(f"Found dpl: {cached_dpl}")
|
402 |
+
|
403 |
+
|
404 |
+
async def get_dpl(service):
|
405 |
+
global cached_scripts, cached_dpl, cached_time
|
406 |
+
if int(time.time()) - cached_time < 15 * 60:
|
407 |
+
return True
|
408 |
+
headers = service.base_headers.copy()
|
409 |
+
cached_scripts = []
|
410 |
+
cached_dpl = ""
|
411 |
+
try:
|
412 |
+
if conversation_only:
|
413 |
+
return True
|
414 |
+
r = await service.s.get(f"{service.host_url}/", headers=headers, timeout=5)
|
415 |
+
r.raise_for_status()
|
416 |
+
get_data_build_from_html(r.text)
|
417 |
+
if not cached_dpl:
|
418 |
+
raise Exception("No Cached DPL")
|
419 |
+
else:
|
420 |
+
return True
|
421 |
+
except Exception as e:
|
422 |
+
logger.info(f"Failed to get dpl: {e}")
|
423 |
+
cached_dpl = None
|
424 |
+
cached_time = int(time.time())
|
425 |
+
return False
|
426 |
+
|
427 |
+
|
428 |
+
def get_parse_time():
|
429 |
+
now = datetime.now(timezone(timedelta(hours=-5)))
|
430 |
+
return now.strftime(timeLayout) + " GMT-0500 (Eastern Standard Time)"
|
431 |
+
|
432 |
+
|
433 |
+
def get_config(user_agent):
|
434 |
+
config = [
|
435 |
+
random.randint(1080, 1440+1080),
|
436 |
+
get_parse_time(),
|
437 |
+
4294705152,
|
438 |
+
0,
|
439 |
+
user_agent,
|
440 |
+
"",
|
441 |
+
cached_dpl,
|
442 |
+
"en-US",
|
443 |
+
"en-US,es-US,en,es",
|
444 |
+
0,
|
445 |
+
random.choice(navigator_key),
|
446 |
+
random.choice(document_key),
|
447 |
+
random.choice(window_key),
|
448 |
+
time.perf_counter(),
|
449 |
+
str(uuid.uuid4()),
|
450 |
+
"",
|
451 |
+
random.choice(cores),
|
452 |
+
]
|
453 |
+
return config
|
454 |
+
|
455 |
+
|
456 |
+
def get_answer_token(seed, diff, config):
|
457 |
+
start = time.time()
|
458 |
+
answer, solved = generate_answer(seed, diff, config)
|
459 |
+
end = time.time()
|
460 |
+
logger.info(f'diff: {diff}, time: {int((end - start) * 1e6) / 1e3}ms, solved: {solved}')
|
461 |
+
return "gAAAAAB" + answer, solved
|
462 |
+
|
463 |
+
|
464 |
+
def generate_answer(seed, diff, config):
|
465 |
+
diff_len = len(diff)
|
466 |
+
seed_encoded = seed.encode()
|
467 |
+
static_config_part1 = (json.dumps(config[:3], separators=(',', ':'), ensure_ascii=False)[:-1] + ',').encode()
|
468 |
+
static_config_part2 = (',' + json.dumps(config[4:9], separators=(',', ':'), ensure_ascii=False)[1:-1] + ',').encode()
|
469 |
+
static_config_part3 = (',' + json.dumps(config[10:], separators=(',', ':'), ensure_ascii=False)[1:]).encode()
|
470 |
+
|
471 |
+
target_diff = bytes.fromhex(diff)
|
472 |
+
|
473 |
+
for i in range(500000):
|
474 |
+
dynamic_json_i = str(i).encode()
|
475 |
+
dynamic_json_j = str(i >> 1).encode()
|
476 |
+
final_json_bytes = static_config_part1 + dynamic_json_i + static_config_part2 + dynamic_json_j + static_config_part3
|
477 |
+
base_encode = pybase64.b64encode(final_json_bytes)
|
478 |
+
hash_value = hashlib.sha3_512(seed_encoded + base_encode).digest()
|
479 |
+
if hash_value[:diff_len] <= target_diff:
|
480 |
+
return base_encode.decode(), True
|
481 |
+
|
482 |
+
return "wQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + pybase64.b64encode(f'"{seed}"'.encode()).decode(), False
|
483 |
+
|
484 |
+
|
485 |
+
def get_requirements_token(config):
|
486 |
+
require, solved = generate_answer(format(random.random()), "0fffff", config)
|
487 |
+
return 'gAAAAAC' + require
|
488 |
+
|
489 |
+
|
490 |
+
if __name__ == "__main__":
|
491 |
+
# cached_scripts.append(
|
492 |
+
# "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
|
493 |
+
# cached_dpl = "453ebaec0d44c2decab71692e1bfe39be35a24b3"
|
494 |
+
# cached_time = int(time.time())
|
495 |
+
# for i in range(10):
|
496 |
+
# seed = format(random.random())
|
497 |
+
# diff = "000032"
|
498 |
+
# config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome")
|
499 |
+
# answer = get_answer_token(seed, diff, config)
|
500 |
+
cached_scripts.append(
|
501 |
+
"https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
|
502 |
+
cached_dpl = "dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3"
|
503 |
+
config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36")
|
504 |
+
get_requirements_token(config)
|
chatgpt/refreshToken.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
|
5 |
+
from fastapi import HTTPException
|
6 |
+
|
7 |
+
from utils.Client import Client
|
8 |
+
from utils.Logger import logger
|
9 |
+
from utils.configs import proxy_url_list
|
10 |
+
import utils.globals as globals
|
11 |
+
|
12 |
+
|
13 |
+
async def rt2ac(refresh_token, force_refresh=False):
|
14 |
+
if not force_refresh and (refresh_token in globals.refresh_map and int(time.time()) - globals.refresh_map.get(refresh_token, {}).get("timestamp", 0) < 5 * 24 * 60 * 60):
|
15 |
+
access_token = globals.refresh_map[refresh_token]["token"]
|
16 |
+
# logger.info(f"refresh_token -> access_token from cache")
|
17 |
+
return access_token
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
access_token = await chat_refresh(refresh_token)
|
21 |
+
globals.refresh_map[refresh_token] = {"token": access_token, "timestamp": int(time.time())}
|
22 |
+
with open(globals.REFRESH_MAP_FILE, "w") as f:
|
23 |
+
json.dump(globals.refresh_map, f, indent=4)
|
24 |
+
logger.info(f"refresh_token -> access_token with openai: {access_token}")
|
25 |
+
return access_token
|
26 |
+
except HTTPException as e:
|
27 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
28 |
+
|
29 |
+
|
30 |
+
async def chat_refresh(refresh_token):
|
31 |
+
data = {
|
32 |
+
"client_id": "pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh",
|
33 |
+
"grant_type": "refresh_token",
|
34 |
+
"redirect_uri": "com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback",
|
35 |
+
"refresh_token": refresh_token
|
36 |
+
}
|
37 |
+
client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
|
38 |
+
try:
|
39 |
+
r = await client.post("https://auth0.openai.com/oauth/token", json=data, timeout=5)
|
40 |
+
if r.status_code == 200:
|
41 |
+
access_token = r.json()['access_token']
|
42 |
+
return access_token
|
43 |
+
else:
|
44 |
+
if "invalid_grant" in r.text or "access_denied" in r.text:
|
45 |
+
if refresh_token not in globals.error_token_list:
|
46 |
+
globals.error_token_list.append(refresh_token)
|
47 |
+
with open(globals.ERROR_TOKENS_FILE, "a", encoding="utf-8") as f:
|
48 |
+
f.write(refresh_token + "\n")
|
49 |
+
raise Exception(r.text)
|
50 |
+
else:
|
51 |
+
raise Exception(r.text[:300])
|
52 |
+
except Exception as e:
|
53 |
+
logger.error(f"Failed to refresh access_token `{refresh_token}`: {str(e)}")
|
54 |
+
raise HTTPException(status_code=500, detail=f"Failed to refresh access_token.")
|
55 |
+
finally:
|
56 |
+
await client.close()
|
57 |
+
del client
|
chatgpt/turnstile.py
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pybase64
|
2 |
+
import json
|
3 |
+
import random
|
4 |
+
import time
|
5 |
+
from typing import Any, Callable, Dict, List, Union
|
6 |
+
|
7 |
+
|
8 |
+
class OrderedMap:
|
9 |
+
def __init__(self):
|
10 |
+
self.keys = []
|
11 |
+
self.values = {}
|
12 |
+
|
13 |
+
def add(self, key: str, value: Any):
|
14 |
+
if key not in self.values:
|
15 |
+
self.keys.append(key)
|
16 |
+
self.values[key] = value
|
17 |
+
|
18 |
+
def to_json(self):
|
19 |
+
return json.dumps({k: self.values[k] for k in self.keys})
|
20 |
+
|
21 |
+
|
22 |
+
TurnTokenList = List[List[Any]]
|
23 |
+
FloatMap = Dict[float, Any]
|
24 |
+
StringMap = Dict[str, Any]
|
25 |
+
FuncType = Callable[..., Any]
|
26 |
+
|
27 |
+
|
28 |
+
def get_turnstile_token(dx: str, p: str) -> Union[str, None]:
|
29 |
+
try:
|
30 |
+
decoded_bytes = pybase64.b64decode(dx)
|
31 |
+
return process_turnstile_token(decoded_bytes.decode(), p)
|
32 |
+
except Exception as e:
|
33 |
+
print(f"Error in get_turnstile_token: {e}")
|
34 |
+
return None
|
35 |
+
|
36 |
+
|
37 |
+
def process_turnstile_token(dx: str, p: str) -> str:
|
38 |
+
result = []
|
39 |
+
p_length = len(p)
|
40 |
+
if p_length != 0:
|
41 |
+
for i, r in enumerate(dx):
|
42 |
+
result.append(chr(ord(r) ^ ord(p[i % p_length])))
|
43 |
+
else:
|
44 |
+
result = list(dx)
|
45 |
+
return ''.join(result)
|
46 |
+
|
47 |
+
|
48 |
+
def is_slice(input_val: Any) -> bool:
|
49 |
+
return isinstance(input_val, (list, tuple))
|
50 |
+
|
51 |
+
|
52 |
+
def is_float(input_val: Any) -> bool:
|
53 |
+
return isinstance(input_val, float)
|
54 |
+
|
55 |
+
|
56 |
+
def is_string(input_val: Any) -> bool:
|
57 |
+
return isinstance(input_val, str)
|
58 |
+
|
59 |
+
|
60 |
+
def to_str(input_val: Any) -> str:
|
61 |
+
if input_val is None:
|
62 |
+
return "undefined"
|
63 |
+
elif is_float(input_val):
|
64 |
+
return str(input_val)
|
65 |
+
elif is_string(input_val):
|
66 |
+
special_cases = {
|
67 |
+
"window.Math": "[object Math]",
|
68 |
+
"window.Reflect": "[object Reflect]",
|
69 |
+
"window.performance": "[object Performance]",
|
70 |
+
"window.localStorage": "[object Storage]",
|
71 |
+
"window.Object": "function Object() { [native code] }",
|
72 |
+
"window.Reflect.set": "function set() { [native code] }",
|
73 |
+
"window.performance.now": "function () { [native code] }",
|
74 |
+
"window.Object.create": "function create() { [native code] }",
|
75 |
+
"window.Object.keys": "function keys() { [native code] }",
|
76 |
+
"window.Math.random": "function random() { [native code] }"
|
77 |
+
}
|
78 |
+
return special_cases.get(input_val, input_val)
|
79 |
+
elif isinstance(input_val, list) and all(isinstance(item, str) for item in input_val):
|
80 |
+
return ','.join(input_val)
|
81 |
+
else:
|
82 |
+
return str(input_val)
|
83 |
+
|
84 |
+
|
85 |
+
def get_func_map() -> FloatMap:
|
86 |
+
process_map: FloatMap = {}
|
87 |
+
|
88 |
+
def func_1(e: float, t: float):
|
89 |
+
e_str = to_str(process_map[e])
|
90 |
+
t_str = to_str(process_map[t])
|
91 |
+
res = process_turnstile_token(e_str, t_str)
|
92 |
+
process_map[e] = res
|
93 |
+
|
94 |
+
def func_2(e: float, t: Any):
|
95 |
+
process_map[e] = t
|
96 |
+
|
97 |
+
def func_5(e: float, t: float):
|
98 |
+
n = process_map[e]
|
99 |
+
tres = process_map[t]
|
100 |
+
if is_slice(n):
|
101 |
+
nt = n + [tres]
|
102 |
+
process_map[e] = nt
|
103 |
+
else:
|
104 |
+
if is_string(n) or is_string(tres):
|
105 |
+
res = to_str(n) + to_str(tres)
|
106 |
+
elif is_float(n) and is_float(tres):
|
107 |
+
res = n + tres
|
108 |
+
else:
|
109 |
+
res = "NaN"
|
110 |
+
process_map[e] = res
|
111 |
+
|
112 |
+
def func_6(e: float, t: float, n: float):
|
113 |
+
tv = process_map[t]
|
114 |
+
nv = process_map[n]
|
115 |
+
if is_string(tv) and is_string(nv):
|
116 |
+
res = f"{tv}.{nv}"
|
117 |
+
if res == "window.document.location":
|
118 |
+
process_map[e] = "https://chatgpt.com/"
|
119 |
+
else:
|
120 |
+
process_map[e] = res
|
121 |
+
else:
|
122 |
+
print("func type 6 error")
|
123 |
+
|
124 |
+
def func_24(e: float, t: float, n: float):
|
125 |
+
tv = process_map[t]
|
126 |
+
nv = process_map[n]
|
127 |
+
if is_string(tv) and is_string(nv):
|
128 |
+
process_map[e] = f"{tv}.{nv}"
|
129 |
+
else:
|
130 |
+
print("func type 24 error")
|
131 |
+
|
132 |
+
def func_7(e: float, *args):
|
133 |
+
n = [process_map[arg] for arg in args]
|
134 |
+
ev = process_map[e]
|
135 |
+
if isinstance(ev, str):
|
136 |
+
if ev == "window.Reflect.set":
|
137 |
+
obj = n[0]
|
138 |
+
key_str = str(n[1])
|
139 |
+
val = n[2]
|
140 |
+
obj.add(key_str, val)
|
141 |
+
elif callable(ev):
|
142 |
+
ev(*n)
|
143 |
+
|
144 |
+
def func_17(e: float, t: float, *args):
|
145 |
+
i = [process_map[arg] for arg in args]
|
146 |
+
tv = process_map[t]
|
147 |
+
res = None
|
148 |
+
if isinstance(tv, str):
|
149 |
+
if tv == "window.performance.now":
|
150 |
+
current_time = time.time_ns()
|
151 |
+
elapsed_ns = current_time - int(start_time * 1e9)
|
152 |
+
res = (elapsed_ns + random.random()) / 1e6
|
153 |
+
elif tv == "window.Object.create":
|
154 |
+
res = OrderedMap()
|
155 |
+
elif tv == "window.Object.keys":
|
156 |
+
if isinstance(i[0], str) and i[0] == "window.localStorage":
|
157 |
+
res = ["STATSIG_LOCAL_STORAGE_INTERNAL_STORE_V4", "STATSIG_LOCAL_STORAGE_STABLE_ID",
|
158 |
+
"client-correlated-secret", "oai/apps/capExpiresAt", "oai-did",
|
159 |
+
"STATSIG_LOCAL_STORAGE_LOGGING_REQUEST", "UiState.isNavigationCollapsed.1"]
|
160 |
+
elif tv == "window.Math.random":
|
161 |
+
res = random.random()
|
162 |
+
elif callable(tv):
|
163 |
+
res = tv(*i)
|
164 |
+
process_map[e] = res
|
165 |
+
|
166 |
+
def func_8(e: float, t: float):
|
167 |
+
process_map[e] = process_map[t]
|
168 |
+
|
169 |
+
def func_14(e: float, t: float):
|
170 |
+
tv = process_map[t]
|
171 |
+
if is_string(tv):
|
172 |
+
token_list = json.loads(tv)
|
173 |
+
process_map[e] = token_list
|
174 |
+
else:
|
175 |
+
print("func type 14 error")
|
176 |
+
|
177 |
+
def func_15(e: float, t: float):
|
178 |
+
tv = process_map[t]
|
179 |
+
process_map[e] = json.dumps(tv)
|
180 |
+
|
181 |
+
def func_18(e: float):
|
182 |
+
ev = process_map[e]
|
183 |
+
e_str = to_str(ev)
|
184 |
+
decoded = pybase64.b64decode(e_str).decode()
|
185 |
+
process_map[e] = decoded
|
186 |
+
|
187 |
+
def func_19(e: float):
|
188 |
+
ev = process_map[e]
|
189 |
+
e_str = to_str(ev)
|
190 |
+
encoded = pybase64.b64encode(e_str.encode()).decode()
|
191 |
+
process_map[e] = encoded
|
192 |
+
|
193 |
+
def func_20(e: float, t: float, n: float, *args):
|
194 |
+
o = [process_map[arg] for arg in args]
|
195 |
+
ev = process_map[e]
|
196 |
+
tv = process_map[t]
|
197 |
+
if ev == tv:
|
198 |
+
nv = process_map[n]
|
199 |
+
if callable(nv):
|
200 |
+
nv(*o)
|
201 |
+
else:
|
202 |
+
print("func type 20 error")
|
203 |
+
|
204 |
+
def func_21(*args):
|
205 |
+
pass
|
206 |
+
|
207 |
+
def func_23(e: float, t: float, *args):
|
208 |
+
i = list(args)
|
209 |
+
ev = process_map[e]
|
210 |
+
tv = process_map[t]
|
211 |
+
if ev is not None:
|
212 |
+
if callable(tv):
|
213 |
+
tv(*i)
|
214 |
+
|
215 |
+
process_map.update({
|
216 |
+
1: func_1, 2: func_2, 5: func_5, 6: func_6, 24: func_24, 7: func_7,
|
217 |
+
17: func_17, 8: func_8, 10: "window", 14: func_14, 15: func_15,
|
218 |
+
18: func_18, 19: func_19, 20: func_20, 21: func_21, 23: func_23
|
219 |
+
})
|
220 |
+
|
221 |
+
return process_map
|
222 |
+
|
223 |
+
start_time = 0
|
224 |
+
|
225 |
+
|
226 |
+
def process_turnstile(dx: str, p: str) -> str:
|
227 |
+
global start_time
|
228 |
+
start_time = time.time()
|
229 |
+
tokens = get_turnstile_token(dx, p)
|
230 |
+
if tokens is None:
|
231 |
+
return ""
|
232 |
+
|
233 |
+
token_list = json.loads(tokens)
|
234 |
+
# print(token_list)
|
235 |
+
res = ""
|
236 |
+
process_map = get_func_map()
|
237 |
+
|
238 |
+
def func_3(e: str):
|
239 |
+
nonlocal res
|
240 |
+
res = pybase64.b64encode(e.encode()).decode()
|
241 |
+
|
242 |
+
process_map[3] = func_3
|
243 |
+
process_map[9] = token_list
|
244 |
+
process_map[16] = p
|
245 |
+
|
246 |
+
for token in token_list:
|
247 |
+
try:
|
248 |
+
e = token[0]
|
249 |
+
t = token[1:]
|
250 |
+
f = process_map.get(e)
|
251 |
+
if callable(f):
|
252 |
+
f(*t)
|
253 |
+
else:
|
254 |
+
pass
|
255 |
+
# print(f"Warning: No function found for key {e}")
|
256 |
+
except Exception as exc:
|
257 |
+
pass
|
258 |
+
# print(f"Error processing token {token}: {exc}")
|
259 |
+
|
260 |
+
return res
|
261 |
+
|
262 |
+
|
263 |
+
if __name__ == "__main__":
|
264 |
+
result = process_turnstile(
|
265 |
+
"PBp5bWF1cHlLe1ttQhRfaTdmXEpidGdEYU5JdGJpR3xfHFVuGHVEY0tZVG18Vh54RWJ5CXpxKXl3SUZ7b2FZAWJaTBl6RGQZURh8BndUcRlQVgoYalAca2QUX24ffQZgdVVbbmBrAH9FV08Rb2oVVgBeQVRrWFp5VGZMYWNyMnoSN0FpaQgFT1l1f3h7c1RtcQUqY1kZbFJ5BQRiZEJXS3RvHGtieh9PaBlHaXhVWnVLRUlKdwsdbUtbKGFaAlN4a0V/emUJe2J2dl9BZkAxZWU/WGocRUBnc3VyT3F4WkJmYSthdBIGf0RwQ2FjAUBnd3ZEelgbVUEIDAJjS1VZbU9sSWFjfk55J2lZFV0HWX1cbVV5dWdAfkFIAVQVbloUXQtYaAR+VXhUF1BZdG4CBHRyK21AG1JaHhBFaBwCWUlocyQGVT4NBzNON2ASFVtXeQRET1kARndjUEBDT2RKeQN7RmJjeVtvZGpDeWJ1EHxafVd+Wk1AbzdLVTpafkd9dWZKeARecGJrS0xcenZIEEJQOmcFa01menFOeVRiSGFZC1JnWUA0SU08QGgeDFFgY34YWXAdZHYaHRhANFRMOV0CZmBfVExTWh9lZlVpSnx6eQURb2poa2RkQVJ0cmF0bwJbQgB6RlRbQHRQaQFKBHtENwVDSWpgHAlbTU1hXEpwdBh2eBlNY3l2UEhnblx7AmpaQ08JDDAzJUVAbn5IA2d8XX5ZFVlrYWhSXWlYQlEdZlQ/QUwuYwJgTG5GZghSRHdCYk1CWWBjclp0aWo3TWMSQmFaaAdge05FbmFhH3hxCFZuIX1BY01WVW5ABx5jfG1ZbjcZEiwwPFYQVm0sdHV8Xnl7alRuemgKZUwICklweW1heHR5Q3UqYVoSR3BCaldIc3Z8SmJOS212CAY5AmMkYmMaRn5UXEthZFsHYFx7ZHRnYV5tcFBZeHocQxUXXU0bYk0VFUZ0ZgFrSWcMRksCAwdJEBBncF12fGUVdnFNQnl4ZQB9WUclYGMRe04TQUZMf0FEbEthW357HEN2aVhAdHAMH0NPdWFicm1YbzNRBSkWMDUAOVdXbBlfRz51ah54YG5iVX9sR2t6RF1pR1RGU20MABBWQy55T3dQfmlUfmFrA35gY2AdDiBWMWVlP1hqHEVAZ3NzfE9/c1pCZWErYXQSB2BKcENjew1baXB9Rm1aG1VBCAkJY01aWW1NbklgZH5Oek1rTX9FFEB7RHNGEG9pKH1eRgFSZGJJdkcMQHUSY0IRQRkzUmFgBG90cklvVwNZThIHQXYABjFJaApCWh1qUEhnWVpiBHxDRDlAHg8kFVcCY1dCUk8VRm9obEN9e21EdnluWxN7eWt8RnFOekRTRXZKXkNPWH40YGMRXHwfRHZ7Z1JKS2R9XG1XR09qCGlaZmZ/QXwnfloWTQxIflxbSVNdSUZgHBRLKCwpQwwmXzB2NFRMOVxUTFNfH3BoRVhfWkcBYghVaSh0ZWMFeG9qBWp5eENNeGNldncHR0wBezVPTjdlSGcOTndjVkAUVl99YQFkRUE2YlNKe3ppeml2V2lvYkhGHjtbNHIALywsMScPEjEFO3Q1MQ0UGDYvK148ETYxIzEcD0gzchNcLSs+LAJxJiEQKBd5MCsXCRclFA0gBRg3axk1HTkBGyoUPRhwCwI2OAIRB2gUBRcjATt6ORQ9JDANOHFlEQITIC8VOS4GAC49GDscBBQMNQ4hDQtQZHYMHmk3BRFHeHZvcXNvd01+WXxPFF9pN2ZaSmR3Z0RkQkl7YmlHbzMsSS8HEy4PPggxGAAYBBcuJREBEQA7LAMANgEiNiZgFR5Mchs0eH83ERFsGCceZTESe2MeEgQSGwgXIgIbb38FFBAWEC1GFC42OQ0CCwcudSIpOwY6MRw7IjwYAgAYD3UbOA8AaHoHPiUkBgQmTA4FUxgAOCoJKxNmVSoANDIzAjdlDxA6ISIOKhQDEhwLPS82IT4CUFIsOyIwLD4+BBsDAww1AnMqHAIlMiMTGT0oAQlUE3QDQhIUACMxDwhGLxEXHQsSIV0FLgMaAgJ2LgsEHyEPLBcKOBtfUhg9MiAXPT5fHhA1Wg8+BxoPLgYcGS0WRSsELjIZKg8EJw4lFQAoUCcTcxASLS9BOTsZD3ERGRUhOD1YUjJxWBEBdnc9PwkQNytyED0zAQtaG3Y2ACsWXSsoPV4+DBQ2DyQ+bg0MHxVHKhAqNh8QPVkNET5fAis5Jh0uGxACKA8kOyo6IBkHIgkKdx0sAgA8SAQVHCkCLwcoBnQHGRAeAxAXOQAdKxhrNxMLJQYrKwAxHnFcOA4HIlEEAVkVDigqAwMoORQQKFkaOy0pISMoRmYDPyFLCRIqVhwCImITET04Gx8QPTMWWRQDcgstAioLGSkBTjw7ECYLeSgraxFoazw2CQcrJgU1cQ0fAB4YEykpIQMEPgJ0NUY0Lhc8IBEEWQtyNSkeECEmHitRFhsULgUrASkfO3E6XDsqLTAVcg8pFCwUaT8rPiMALzskFQQNJBkfKgUxBwscAj4YWhYHDxoXEBRwHgUUMx4gCxsCGBRJAz5yABsCAxIPFSo2AQILLSs7NS4EAGEnFBANJBgTOV0FLWJSKAUQeRkDKyAjCjYqIwEUBwAUPT5iBgohDzYmBAEBJS4pCSspGgUQBDsuD3wvKFd7HwE/EQ8ZFQgRICYEAgUuRhovHFYdM15eNwIgZBgmBVIoJGBnACRXChIKQR8lDVh2CicfKTIBcxwzNionIg4PEVI0FyMQOTkaABI3JSoAByVTKAItJn1ULjcEOG4gBjoqDnAQDjsGHzA2cF92CTIlAhMdchoJABA6KQEyajcgBAM+IhwyE292OTQ0IzUsAVY8EBcxMRxoKgEhBRQSGTMLfQsgFDp1PDQsCgEFKAkIASA8EhF4IgpjIzMJJC4WcyYcEQkPPSMBHlUSfFkuPCQnKiMaAGYWEC80EQIeex9wJjszCSQMFg4iDDcvVxMEBR17Knw0OnMVRyc4fj9ROQpiABoWFxAscR0Na3gBHWdyPjcOBCMleBQgKR4rLQViBhcLGnEgDDZ4ACoPJhQQIH4nHBoDNhkWCyUWDRgVFx4YAwAzFjAELCUPNScjDQ4hDB54Gwg4K2g3BmMBKjkwGggiFAo0Iwp6BBQeDxYwBz4VKCIzeDQmJjYeXTUmHCZpcygrAQt3NAFrBjsmGhtWJz8uUiR3CjorPy4NJXUuOjYIBDoMDGM4MwxxNiMNGg4SES01GHA1O3EIOSo7LQUXHnEeOgIjPXENLjQSfn4OVSkSAgcFBQIxDQUuajUPOj0MFwwcZhMnVzQOCQMDAWBWZBUPPx4oBAA5YA5qBwcrEwQ+IjppEz47Ji4CE2YNKTEzAUcjBgAoFFwyKHwbCz8pARUrDgIIMgg1H2MXGTUBFx0XAgMdEj0HOQ4MIionOyE2cUcxHAA7Iw0sNTkBDUU9GRsbPgkzOBwNKD9hHBdVJipxVTYRAgMmGAIVKxc2JREoNxgtMysDHggNExYWBh8FHwUfBQ8/KQYONiUrLjkfIwpxHDgYCTw1MDEMMBU2JRErK2crDzZdCy94UjAOC00MMgFCKTJxZw8mdgoSCzQMcAtzDC8hMBw7CHJ/GjQ+Cw4aDAVyMTMwEi8gHhUfNB8sDi4hWTQ0GDdJdSEVNggXAhY7Knd3MQ4KGhoZDm11DysqLxI8NXYZCXMDMngaMQg5PSsYKjYxJRJzdx8jOzQlIwklEwgtDhEMdwskLAs3Izg7LQscJi4IeyE3GiAbDAYrHzEzEjcxKicAdSteCTMqJHsUMSEXMT0kJD4Ga3V2Kk4rMSUZHS8qMAsqHTsEPR8RXzArXzc2OgYQOy4oPXc1AQM+DhpuMDFRFTMrBn8pCQkCdCE/MDILKG8uGllRNRlGRy0NGjsyFGoTKSUsOiwkAi8sNRJUNgQ0czEuFgUNMShjBAsBDDErbywzKBoKKzkeOncPDR42HCskNGg7BjEMVgAvOyApLQ5WPgAVHiM+Jz8eOA8BOSI7Xwo4JGIJNjYdCz0MFmAuPhEbLzc3VjUQAGwoHjATcSAGdwUVCjIqMDA1OyQNUB5gGRw6UwpkNS0eECoqbCt2KzQEdD1jBzEZOxQdIjBoMxVqCyoEBToSDB5xPz44LA9MCDAKMAZhLgZZACwMKAYDPWgHODIGHiwMIDUpZ2YEMA04By8INQl3ClQLLC8wCDIIXG8/PSARMDYQLxQyeh8qFTg7MhhUDzkLKwNzDT8RPQ84JC0dDTAqGDA7KxkoKDAcPzh1KQo9LzkeN3YMIxc4HzsBNxorAj0jQX90CCMlPQ4FMTYPfDgwDA0sMyoJHyw6EigMCwULUBsDcnsAdQUAKRAMFBIqLQwCGCkLLmoOJQIEOSU/JQ0JFQgmDx02LwgrIjMLHQQ9DCw+cgoRJREWZAQkCyoyNgskJip0JDg5cy1BXXIzJAl3GCQCdggwZXEbBmcPNAwwCAV9fAkGDDUUBhBmKTgyKAo0KRklcRc/IxY5KQ8SACIKEgg4FVUuDx0FUVoiK3IuEiQEGQkkYToJDhcPJhVTfA8zMiMhFgxnAystCycgLTweB1A0GAMuACIBVEUKHSYiCR0UJA0ENQsRBwUPCgEpMCcvGyUKdxcvH3U5OAwRegMnCiE1IxYiOgsGEGoOAhg/DxJ9IggHCzESCgMsJgJ9awodFDksDRAyCyA1NwodDCwJOFcWCw0yNwokfTUKLwt3IwolIwwocTcbRRAeCwoMHiUZOWkeCRclHihWMyVVcTcfVQEkJjAyMyReOT0jEFwMC1UPPyMwATQnO1oxHz8DNSIoAScYMBMtDi8iFgwgHwwKMAxnDjsXDQooCx4YHSY4JQYYPgQ0Cz0PVkQEEQYqKCIWPTELLBsxElgUMBcENhMKPQQRbyQVRhJdREdUW0tUYB4MX2BjeAU8bxEfZUVYW1VHTF5OSQV/f1xBMU5Jamd7QX9fbWd4H3p1ZhNuYmRFVHRyZHRnBltCCnxGV1YxeEQcDUp3ZlJAFFhafWEKFUlQQ25cOW9iHm90Yk5teXpaSGdhXHsBYStPTR1fdG5wHUIAZ0ZuZWVTeFQVWWliaFxSGFRQOARhQlRVQFVpBmBObEZmAUlKdU9gW0VFbHJkXW0Ffko6cmVTfEx3CXdvV1x+eWMDE2h1IXlJZ0J1VkNKe1cGBnZkcE1gdFJbbXdsWntMECo=",
|
266 |
+
"gAAAAACWzMwMzIsIlRodSBKdWwgMTEgMjAyNCAwMzoxMDo0NiBHTVQrMDgwMCAo5Lit5Zu95qCH5YeG5pe26Ze0KSIsNDI5NDcwNTE1MiwxLCJNb3ppbGxhLzUuMCAoV2luZG93cyBOVCAxMC4wOyBXaW42NDsgeDY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMTI2LjAuMC4wIFNhZmFyaS81MzcuMzYgRWRnLzEyNi4wLjAuMCIsImh0dHBzOi8vY2RuLm9haXN0YXRpYy5jb20vX25leHQvc3RhdGljL2NodW5rcy9wYWdlcy9fYXBwLWMwOWZmNWY0MjQwMjcwZjguanMiLCJjL1pGWGkxeTNpMnpaS0EzSVQwNzRzMy9fIiwiemgtQ04iLCJ6aC1DTixlbixlbi1HQixlbi1VUyIsMTM1LCJ3ZWJraXRUZW1wb3JhcnlTdG9yYWdl4oiSW29iamVjdCBEZXByZWNhdGVkU3RvcmFnZVF1b3RhXSIsIl9yZWFjdExpc3RlbmluZ3NxZjF0ejFzNmsiLCJmZXRjaCIsMzY1NCwiNWU1NDUzNzItMzcyNy00ZDAyLTkwMDYtMzMwMDRjMWJmYTQ2Il0="
|
267 |
+
)
|
268 |
+
print(result)
|
chatgpt/wssClient.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import time
|
3 |
+
|
4 |
+
from utils.Logger import logger
|
5 |
+
import utils.globals as globals
|
6 |
+
|
7 |
+
|
8 |
+
def save_wss_map(wss_map):
|
9 |
+
with open(globals.WSS_MAP_FILE, "w") as f:
|
10 |
+
json.dump(wss_map, f, indent=4)
|
11 |
+
|
12 |
+
|
13 |
+
async def token2wss(token):
|
14 |
+
if not token:
|
15 |
+
return False, None
|
16 |
+
if token in globals.wss_map:
|
17 |
+
wss_mode = globals.wss_map[token]["wss_mode"]
|
18 |
+
if wss_mode:
|
19 |
+
if int(time.time()) - globals.wss_map.get(token, {}).get("timestamp", 0) < 60 * 60:
|
20 |
+
wss_url = globals.wss_map[token]["wss_url"]
|
21 |
+
logger.info(f"token -> wss_url from cache")
|
22 |
+
return wss_mode, wss_url
|
23 |
+
else:
|
24 |
+
logger.info(f"token -> wss_url expired")
|
25 |
+
return wss_mode, None
|
26 |
+
else:
|
27 |
+
return False, None
|
28 |
+
return False, None
|
29 |
+
|
30 |
+
|
31 |
+
async def set_wss(token, wss_mode, wss_url=None):
|
32 |
+
if not token:
|
33 |
+
return True
|
34 |
+
globals.wss_map[token] = {"timestamp": int(time.time()), "wss_url": wss_url, "wss_mode": wss_mode}
|
35 |
+
save_wss_map(globals.wss_map)
|
36 |
+
return True
|
docker-compose-warp.yml
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
warp:
|
3 |
+
image: caomingjun/warp
|
4 |
+
container_name: warp
|
5 |
+
restart: always
|
6 |
+
devices:
|
7 |
+
- /dev/net/tun:/dev/net/tun
|
8 |
+
environment:
|
9 |
+
- WARP_SLEEP=5
|
10 |
+
cap_add:
|
11 |
+
- MKNOD
|
12 |
+
- AUDIT_WRITE
|
13 |
+
- NET_ADMIN
|
14 |
+
sysctls:
|
15 |
+
- net.ipv6.conf.all.disable_ipv6=0
|
16 |
+
- net.ipv4.conf.all.src_valid_mark=1
|
17 |
+
volumes:
|
18 |
+
- ./warpdata:/var/lib/cloudflare-warp
|
19 |
+
networks:
|
20 |
+
- internal_network # 使用内部网络,不对外暴露端口
|
21 |
+
healthcheck:
|
22 |
+
test: ["CMD", "curl", "-f", "-s", "https://www.google.com"] # 静默模式下请求Google,如果成功返回2xx状态码
|
23 |
+
interval: 30s # 每隔30秒检查一次
|
24 |
+
timeout: 10s # 请求超时10秒
|
25 |
+
retries: 3 # 失败3次后标记为不健康
|
26 |
+
start_period: 5s # 容器启动后等待5秒再开始检查
|
27 |
+
|
28 |
+
chat2api:
|
29 |
+
image: lanqian528/chat2api:latest
|
30 |
+
container_name: chat2api
|
31 |
+
restart: unless-stopped
|
32 |
+
ports:
|
33 |
+
- '5005:5005' # 暴露chat2api服务供外部访问
|
34 |
+
environment:
|
35 |
+
- TZ=Asia/Shanghai
|
36 |
+
- AUTHORIZATION=sk-xxx
|
37 |
+
- PROXY_URL=socks5://warp:1080 # 设置 PROXY_URL 为 warp 容器的代理地址
|
38 |
+
depends_on:
|
39 |
+
warp:
|
40 |
+
condition: service_healthy # 只有 warp 的健康检查通过时,chat2api 才会启动
|
41 |
+
networks:
|
42 |
+
- internal_network # chat2api 和 warp 在同一个内部网络
|
43 |
+
volumes:
|
44 |
+
- ./data:/app/data # 挂载一些需要保存的数据
|
45 |
+
|
46 |
+
watchtower:
|
47 |
+
image: containrrr/watchtower
|
48 |
+
container_name: watchtower
|
49 |
+
restart: unless-stopped
|
50 |
+
volumes:
|
51 |
+
- /var/run/docker.sock:/var/run/docker.sock
|
52 |
+
command: --cleanup --interval 300 chat2api
|
53 |
+
|
54 |
+
networks:
|
55 |
+
internal_network:
|
56 |
+
driver: bridge # 定义一个桥接网络
|
docker-compose.yml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: '3'
|
2 |
+
|
3 |
+
services:
|
4 |
+
chat2api:
|
5 |
+
image: lanqian528/chat2api:latest
|
6 |
+
container_name: chat2api
|
7 |
+
restart: unless-stopped
|
8 |
+
ports:
|
9 |
+
- '5005:5005'
|
10 |
+
volumes:
|
11 |
+
- ./data:/app/data
|
12 |
+
environment:
|
13 |
+
- TZ=Asia/Shanghai
|
14 |
+
- AUTHORIZATION=sk-xxx
|
15 |
+
|
16 |
+
watchtower:
|
17 |
+
image: containrrr/watchtower
|
18 |
+
container_name: watchtower
|
19 |
+
restart: unless-stopped
|
20 |
+
volumes:
|
21 |
+
- /var/run/docker.sock:/var/run/docker.sock
|
22 |
+
command: --cleanup --interval 300 chat2api
|
gateway/admin.py
ADDED
File without changes
|
gateway/backend.py
ADDED
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
import re
|
4 |
+
import time
|
5 |
+
import uuid
|
6 |
+
|
7 |
+
from fastapi import Request, HTTPException
|
8 |
+
from fastapi.responses import RedirectResponse, StreamingResponse, Response
|
9 |
+
from starlette.background import BackgroundTask
|
10 |
+
from starlette.concurrency import run_in_threadpool
|
11 |
+
|
12 |
+
import utils.globals as globals
|
13 |
+
from app import app
|
14 |
+
from chatgpt.authorization import verify_token
|
15 |
+
from chatgpt.fp import get_fp
|
16 |
+
from chatgpt.proofofWork import get_answer_token, get_config, get_requirements_token
|
17 |
+
from gateway.chatgpt import chatgpt_html
|
18 |
+
from gateway.reverseProxy import chatgpt_reverse_proxy, content_generator, get_real_req_token, headers_reject_list
|
19 |
+
from utils.Client import Client
|
20 |
+
from utils.Logger import logger
|
21 |
+
from utils.configs import x_sign, turnstile_solver_url, chatgpt_base_url_list, no_sentinel, sentinel_proxy_url_list, \
|
22 |
+
force_no_history
|
23 |
+
|
24 |
+
banned_paths = [
|
25 |
+
"backend-api/accounts/logout_all",
|
26 |
+
"backend-api/accounts/deactivate",
|
27 |
+
"backend-api/payments",
|
28 |
+
"backend-api/subscriptions",
|
29 |
+
"backend-api/user_system_messages",
|
30 |
+
"backend-api/memories",
|
31 |
+
"backend-api/settings/clear_account_user_memory",
|
32 |
+
"backend-api/conversations/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}",
|
33 |
+
"backend-api/accounts/mfa_info",
|
34 |
+
"backend-api/accounts/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}/invites",
|
35 |
+
"admin",
|
36 |
+
]
|
37 |
+
redirect_paths = ["auth/logout"]
|
38 |
+
chatgpt_paths = ["c/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"]
|
39 |
+
|
40 |
+
|
41 |
+
@app.get("/backend-api/accounts/check/v4-2023-04-27")
|
42 |
+
async def check_account(request: Request):
|
43 |
+
token = request.headers.get("Authorization").replace("Bearer ", "")
|
44 |
+
check_account_response = await chatgpt_reverse_proxy(request, "backend-api/accounts/check/v4-2023-04-27")
|
45 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
46 |
+
return check_account_response
|
47 |
+
else:
|
48 |
+
check_account_str = check_account_response.body.decode('utf-8')
|
49 |
+
check_account_info = json.loads(check_account_str)
|
50 |
+
for key in check_account_info.get("accounts", {}).keys():
|
51 |
+
account_id = check_account_info["accounts"][key]["account"]["account_id"]
|
52 |
+
globals.seed_map[token]["user_id"] = \
|
53 |
+
check_account_info["accounts"][key]["account"]["account_user_id"].split("__")[0]
|
54 |
+
check_account_info["accounts"][key]["account"]["account_user_id"] = f"user-chatgpt__{account_id}"
|
55 |
+
with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
|
56 |
+
json.dump(globals.seed_map, f, indent=4)
|
57 |
+
return check_account_info
|
58 |
+
|
59 |
+
|
60 |
+
@app.get("/backend-api/gizmos/bootstrap")
|
61 |
+
async def get_gizmos_bootstrap(request: Request):
|
62 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
63 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
64 |
+
return await chatgpt_reverse_proxy(request, "backend-api/gizmos/bootstrap")
|
65 |
+
else:
|
66 |
+
return {"gizmos": []}
|
67 |
+
|
68 |
+
|
69 |
+
@app.get("/backend-api/gizmos/pinned")
|
70 |
+
async def get_gizmos_pinned(request: Request):
|
71 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
72 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
73 |
+
return await chatgpt_reverse_proxy(request, "backend-api/gizmos/pinned")
|
74 |
+
else:
|
75 |
+
return {"items": [], "cursor": None}
|
76 |
+
|
77 |
+
|
78 |
+
@app.get("/public-api/gizmos/discovery/recent")
|
79 |
+
async def get_gizmos_discovery_recent(request: Request):
|
80 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
81 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
82 |
+
return await chatgpt_reverse_proxy(request, "public-api/gizmos/discovery/recent")
|
83 |
+
else:
|
84 |
+
return {
|
85 |
+
"info": {
|
86 |
+
"id": "recent",
|
87 |
+
"title": "Recently Used",
|
88 |
+
},
|
89 |
+
"list": {
|
90 |
+
"items": [],
|
91 |
+
"cursor": None
|
92 |
+
}
|
93 |
+
}
|
94 |
+
|
95 |
+
|
96 |
+
@app.api_route("/backend-api/conversations", methods=["GET", "PATCH"])
|
97 |
+
async def get_conversations(request: Request):
|
98 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
99 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
100 |
+
return await chatgpt_reverse_proxy(request, "backend-api/conversations")
|
101 |
+
if request.method == "GET":
|
102 |
+
limit = int(request.query_params.get("limit", 28))
|
103 |
+
offset = int(request.query_params.get("offset", 0))
|
104 |
+
is_archived = request.query_params.get("is_archived", None)
|
105 |
+
items = []
|
106 |
+
for conversation_id in globals.seed_map.get(token, {}).get("conversations", []):
|
107 |
+
conversation = globals.conversation_map.get(conversation_id, None)
|
108 |
+
if conversation:
|
109 |
+
if is_archived == "true":
|
110 |
+
if conversation.get("is_archived", False):
|
111 |
+
items.append(conversation)
|
112 |
+
else:
|
113 |
+
if not conversation.get("is_archived", False):
|
114 |
+
items.append(conversation)
|
115 |
+
items = items[int(offset):int(offset) + int(limit)]
|
116 |
+
conversations = {
|
117 |
+
"items": items,
|
118 |
+
"total": len(items),
|
119 |
+
"limit": limit,
|
120 |
+
"offset": offset,
|
121 |
+
"has_missing_conversations": False
|
122 |
+
}
|
123 |
+
return Response(content=json.dumps(conversations, indent=4), media_type="application/json")
|
124 |
+
else:
|
125 |
+
raise HTTPException(status_code=403, detail="Forbidden")
|
126 |
+
|
127 |
+
|
128 |
+
@app.get("/backend-api/conversation/{conversation_id}")
|
129 |
+
async def update_conversation(request: Request, conversation_id: str):
|
130 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
131 |
+
conversation_details_response = await chatgpt_reverse_proxy(request,
|
132 |
+
f"backend-api/conversation/{conversation_id}")
|
133 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
134 |
+
return conversation_details_response
|
135 |
+
else:
|
136 |
+
conversation_details_str = conversation_details_response.body.decode('utf-8')
|
137 |
+
conversation_details = json.loads(conversation_details_str)
|
138 |
+
if conversation_id in globals.seed_map[token][
|
139 |
+
"conversations"] and conversation_id in globals.conversation_map:
|
140 |
+
globals.conversation_map[conversation_id]["title"] = conversation_details.get("title", None)
|
141 |
+
globals.conversation_map[conversation_id]["is_archived"] = conversation_details.get("is_archived",
|
142 |
+
False)
|
143 |
+
globals.conversation_map[conversation_id]["conversation_template_id"] = conversation_details.get(
|
144 |
+
"conversation_template_id", None)
|
145 |
+
globals.conversation_map[conversation_id]["gizmo_id"] = conversation_details.get("gizmo_id", None)
|
146 |
+
globals.conversation_map[conversation_id]["async_status"] = conversation_details.get("async_status",
|
147 |
+
None)
|
148 |
+
with open(globals.CONVERSATION_MAP_FILE, "w", encoding="utf-8") as f:
|
149 |
+
json.dump(globals.conversation_map, f, indent=4)
|
150 |
+
return conversation_details_response
|
151 |
+
|
152 |
+
|
153 |
+
@app.patch("/backend-api/conversation/{conversation_id}")
|
154 |
+
async def patch_conversation(request: Request, conversation_id: str):
|
155 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
156 |
+
patch_response = (await chatgpt_reverse_proxy(request, f"backend-api/conversation/{conversation_id}"))
|
157 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
158 |
+
return patch_response
|
159 |
+
else:
|
160 |
+
data = await request.json()
|
161 |
+
if conversation_id in globals.seed_map[token][
|
162 |
+
"conversations"] and conversation_id in globals.conversation_map:
|
163 |
+
if not data.get("is_visible", True):
|
164 |
+
globals.conversation_map.pop(conversation_id)
|
165 |
+
globals.seed_map[token]["conversations"].remove(conversation_id)
|
166 |
+
with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
|
167 |
+
json.dump(globals.seed_map, f, indent=4)
|
168 |
+
else:
|
169 |
+
globals.conversation_map[conversation_id].update(data)
|
170 |
+
with open(globals.CONVERSATION_MAP_FILE, "w", encoding="utf-8") as f:
|
171 |
+
json.dump(globals.conversation_map, f, indent=4)
|
172 |
+
return patch_response
|
173 |
+
|
174 |
+
|
175 |
+
@app.get("/backend-api/me")
|
176 |
+
async def get_me(request: Request):
|
177 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
178 |
+
if len(token) == 45 or token.startswith("eyJhbGciOi"):
|
179 |
+
return await chatgpt_reverse_proxy(request, "backend-api/me")
|
180 |
+
else:
|
181 |
+
me = {
|
182 |
+
"object": "user",
|
183 |
+
"id": "org-chatgpt",
|
184 |
+
"email": "[email protected]",
|
185 |
+
"name": "ChatGPT",
|
186 |
+
"picture": "https://cdn.auth0.com/avatars/ai.png",
|
187 |
+
"created": int(time.time()),
|
188 |
+
"phone_number": None,
|
189 |
+
"mfa_flag_enabled": False,
|
190 |
+
"amr": [],
|
191 |
+
"groups": [],
|
192 |
+
"orgs": {
|
193 |
+
"object": "list",
|
194 |
+
"data": [
|
195 |
+
{
|
196 |
+
"object": "organization",
|
197 |
+
"id": "org-chatgpt",
|
198 |
+
"created": 1715641300,
|
199 |
+
"title": "Personal",
|
200 |
+
"name": "user-chatgpt",
|
201 |
+
"description": "Personal org for [email protected]",
|
202 |
+
"personal": True,
|
203 |
+
"settings": {
|
204 |
+
"threads_ui_visibility": "NONE",
|
205 |
+
"usage_dashboard_visibility": "ANY_ROLE",
|
206 |
+
"disable_user_api_keys": False
|
207 |
+
},
|
208 |
+
"parent_org_id": None,
|
209 |
+
"is_default": True,
|
210 |
+
"role": "owner",
|
211 |
+
"is_scale_tier_authorized_purchaser": None,
|
212 |
+
"is_scim_managed": False,
|
213 |
+
"projects": {
|
214 |
+
"object": "list",
|
215 |
+
"data": []
|
216 |
+
},
|
217 |
+
"groups": [],
|
218 |
+
"geography": None
|
219 |
+
}
|
220 |
+
]
|
221 |
+
},
|
222 |
+
"has_payg_project_spend_limit": True
|
223 |
+
}
|
224 |
+
return Response(content=json.dumps(me, indent=4), media_type="application/json")
|
225 |
+
|
226 |
+
|
227 |
+
@app.post("/backend-api/edge")
|
228 |
+
async def edge():
|
229 |
+
return Response(status_code=204)
|
230 |
+
|
231 |
+
|
232 |
+
if no_sentinel:
|
233 |
+
@app.post("/backend-api/sentinel/chat-requirements")
|
234 |
+
async def sentinel_chat_conversations():
|
235 |
+
return {
|
236 |
+
"arkose": {
|
237 |
+
"dx": None,
|
238 |
+
"required": False
|
239 |
+
},
|
240 |
+
"persona": "chatgpt-paid",
|
241 |
+
"proofofwork": {
|
242 |
+
"difficulty": None,
|
243 |
+
"required": False,
|
244 |
+
"seed": None
|
245 |
+
},
|
246 |
+
"token": str(uuid.uuid4()),
|
247 |
+
"turnstile": {
|
248 |
+
"dx": None,
|
249 |
+
"required": False
|
250 |
+
}
|
251 |
+
}
|
252 |
+
|
253 |
+
|
254 |
+
@app.post("/backend-api/conversation")
|
255 |
+
async def chat_conversations(request: Request):
|
256 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
257 |
+
req_token = await get_real_req_token(token)
|
258 |
+
access_token = await verify_token(req_token)
|
259 |
+
fp = get_fp(req_token).copy()
|
260 |
+
proxy_url = fp.pop("proxy_url", None)
|
261 |
+
impersonate = fp.pop("impersonate", "safari15_3")
|
262 |
+
user_agent = fp.get("user-agent",
|
263 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0")
|
264 |
+
|
265 |
+
host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
|
266 |
+
proof_token = None
|
267 |
+
turnstile_token = None
|
268 |
+
|
269 |
+
headers = {
|
270 |
+
key: value for key, value in request.headers.items()
|
271 |
+
if (key.lower() not in ["host", "origin", "referer", "priority", "sec-ch-ua-platform", "sec-ch-ua",
|
272 |
+
"sec-ch-ua-mobile", "oai-device-id"] and key.lower() not in headers_reject_list)
|
273 |
+
}
|
274 |
+
headers.update(fp)
|
275 |
+
headers.update({"authorization": f"Bearer {access_token}"})
|
276 |
+
|
277 |
+
client = Client(proxy=proxy_url, impersonate=impersonate)
|
278 |
+
if sentinel_proxy_url_list:
|
279 |
+
clients = Client(proxy=random.choice(sentinel_proxy_url_list), impersonate=impersonate)
|
280 |
+
else:
|
281 |
+
clients = client
|
282 |
+
|
283 |
+
config = get_config(user_agent)
|
284 |
+
p = get_requirements_token(config)
|
285 |
+
data = {'p': p}
|
286 |
+
r = await clients.post(f'{host_url}/backend-api/sentinel/chat-requirements', headers=headers, json=data,
|
287 |
+
timeout=10)
|
288 |
+
resp = r.json()
|
289 |
+
turnstile = resp.get('turnstile', {})
|
290 |
+
turnstile_required = turnstile.get('required')
|
291 |
+
if turnstile_required:
|
292 |
+
turnstile_dx = turnstile.get("dx")
|
293 |
+
try:
|
294 |
+
if turnstile_solver_url:
|
295 |
+
res = await client.post(turnstile_solver_url,
|
296 |
+
json={"url": "https://chatgpt.com", "p": p, "dx": turnstile_dx})
|
297 |
+
turnstile_token = res.json().get("t")
|
298 |
+
except Exception as e:
|
299 |
+
logger.info(f"Turnstile ignored: {e}")
|
300 |
+
|
301 |
+
proofofwork = resp.get('proofofwork', {})
|
302 |
+
proofofwork_required = proofofwork.get('required')
|
303 |
+
if proofofwork_required:
|
304 |
+
proofofwork_diff = proofofwork.get("difficulty")
|
305 |
+
proofofwork_seed = proofofwork.get("seed")
|
306 |
+
proof_token, solved = await run_in_threadpool(
|
307 |
+
get_answer_token, proofofwork_seed, proofofwork_diff, config
|
308 |
+
)
|
309 |
+
if not solved:
|
310 |
+
raise HTTPException(status_code=403, detail="Failed to solve proof of work")
|
311 |
+
chat_token = resp.get('token')
|
312 |
+
headers.update({
|
313 |
+
"openai-sentinel-chat-requirements-token": chat_token,
|
314 |
+
"openai-sentinel-proof-token": proof_token,
|
315 |
+
"openai-sentinel-turnstile-token": turnstile_token,
|
316 |
+
})
|
317 |
+
|
318 |
+
params = dict(request.query_params)
|
319 |
+
data = await request.body()
|
320 |
+
request_cookies = dict(request.cookies)
|
321 |
+
|
322 |
+
async def c_close(client, clients):
|
323 |
+
if client:
|
324 |
+
await client.close()
|
325 |
+
del client
|
326 |
+
if clients:
|
327 |
+
await clients.close()
|
328 |
+
del clients
|
329 |
+
|
330 |
+
history = True
|
331 |
+
try:
|
332 |
+
req_json = json.loads(data)
|
333 |
+
history = not req_json.get("history_and_training_disabled", False)
|
334 |
+
except Exception:
|
335 |
+
pass
|
336 |
+
if force_no_history:
|
337 |
+
history = False
|
338 |
+
req_json = json.loads(data)
|
339 |
+
req_json["history_and_training_disabled"] = True
|
340 |
+
data = json.dumps(req_json).encode("utf-8")
|
341 |
+
|
342 |
+
background = BackgroundTask(c_close, client, clients)
|
343 |
+
r = await client.post_stream(f"{host_url}/backend-api/conversation", params=params, headers=headers,
|
344 |
+
cookies=request_cookies, data=data, stream=True, allow_redirects=False)
|
345 |
+
rheaders = r.headers
|
346 |
+
logger.info(f"Request token: {req_token}")
|
347 |
+
logger.info(f"Request proxy: {proxy_url}")
|
348 |
+
logger.info(f"Request UA: {user_agent}")
|
349 |
+
logger.info(f"Request impersonate: {impersonate}")
|
350 |
+
if x_sign:
|
351 |
+
rheaders.update({"x-sign": x_sign})
|
352 |
+
if 'stream' in rheaders.get("content-type", ""):
|
353 |
+
conv_key = r.cookies.get("conv_key", "")
|
354 |
+
response = StreamingResponse(content_generator(r, token, history), headers=rheaders,
|
355 |
+
media_type=r.headers.get("content-type", ""), background=background)
|
356 |
+
response.set_cookie("conv_key", value=conv_key)
|
357 |
+
return response
|
358 |
+
else:
|
359 |
+
return Response(content=(await r.atext()), headers=rheaders, media_type=rheaders.get("content-type"),
|
360 |
+
status_code=r.status_code, background=background)
|
361 |
+
|
362 |
+
|
363 |
+
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
|
364 |
+
async def reverse_proxy(request: Request, path: str):
|
365 |
+
token = request.headers.get("Authorization", "").replace("Bearer ", "")
|
366 |
+
if len(token) != 45 and not token.startswith("eyJhbGciOi"):
|
367 |
+
for banned_path in banned_paths:
|
368 |
+
if re.match(banned_path, path):
|
369 |
+
raise HTTPException(status_code=403, detail="Forbidden")
|
370 |
+
|
371 |
+
for chatgpt_path in chatgpt_paths:
|
372 |
+
if re.match(chatgpt_path, path):
|
373 |
+
return await chatgpt_html(request)
|
374 |
+
|
375 |
+
for redirect_path in redirect_paths:
|
376 |
+
if re.match(redirect_path, path):
|
377 |
+
redirect_url = str(request.base_url)
|
378 |
+
response = RedirectResponse(url=f"{redirect_url}login", status_code=302)
|
379 |
+
return response
|
380 |
+
|
381 |
+
return await chatgpt_reverse_proxy(request, path)
|
gateway/chatgpt.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from urllib.parse import quote
|
3 |
+
|
4 |
+
from fastapi import Request
|
5 |
+
from fastapi.responses import HTMLResponse
|
6 |
+
|
7 |
+
from app import app, templates
|
8 |
+
from gateway.login import login_html
|
9 |
+
from utils.kv_utils import set_value_for_key
|
10 |
+
|
11 |
+
with open("templates/chatgpt_context.json", "r", encoding="utf-8") as f:
|
12 |
+
chatgpt_context = json.load(f)
|
13 |
+
|
14 |
+
|
15 |
+
@app.get("/", response_class=HTMLResponse)
|
16 |
+
async def chatgpt_html(request: Request):
|
17 |
+
token = request.query_params.get("token")
|
18 |
+
if not token:
|
19 |
+
token = request.cookies.get("token")
|
20 |
+
if not token:
|
21 |
+
return await login_html(request)
|
22 |
+
|
23 |
+
if len(token) != 45 and not token.startswith("eyJhbGciOi"):
|
24 |
+
token = quote(token)
|
25 |
+
|
26 |
+
user_remix_context = chatgpt_context.copy()
|
27 |
+
set_value_for_key(user_remix_context, "user", {"id": "user-chatgpt"})
|
28 |
+
set_value_for_key(user_remix_context, "accessToken", token)
|
29 |
+
|
30 |
+
response = templates.TemplateResponse("chatgpt.html", {"request": request, "remix_context": user_remix_context})
|
31 |
+
response.set_cookie("token", value=token, expires="Thu, 01 Jan 2099 00:00:00 GMT")
|
32 |
+
return response
|
gateway/gpts.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
from fastapi import Request
|
4 |
+
from fastapi.responses import Response
|
5 |
+
|
6 |
+
from app import app
|
7 |
+
from gateway.chatgpt import chatgpt_html
|
8 |
+
|
9 |
+
with open("templates/gpts_context.json", "r", encoding="utf-8") as f:
|
10 |
+
gpts_context = json.load(f)
|
11 |
+
|
12 |
+
|
13 |
+
@app.get("/gpts")
|
14 |
+
async def get_gpts():
|
15 |
+
return {"kind": "store"}
|
16 |
+
|
17 |
+
|
18 |
+
@app.get("/g/g-{gizmo_id}")
|
19 |
+
async def get_gizmo_json(request: Request, gizmo_id: str):
|
20 |
+
params = request.query_params
|
21 |
+
if params.get("_data") == "routes/g.$gizmoId._index":
|
22 |
+
return Response(content=json.dumps(gpts_context, indent=4), media_type="application/json")
|
23 |
+
else:
|
24 |
+
return await chatgpt_html(request)
|
gateway/login.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import Request
|
2 |
+
from fastapi.responses import HTMLResponse
|
3 |
+
|
4 |
+
from app import app, templates
|
5 |
+
|
6 |
+
|
7 |
+
@app.get("/login", response_class=HTMLResponse)
|
8 |
+
async def login_html(request: Request):
|
9 |
+
response = templates.TemplateResponse("login.html", {"request": request})
|
10 |
+
return response
|
gateway/reverseProxy.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
import uuid
|
5 |
+
from datetime import datetime, timezone
|
6 |
+
|
7 |
+
from fastapi import Request, HTTPException
|
8 |
+
from fastapi.responses import StreamingResponse, Response
|
9 |
+
from starlette.background import BackgroundTask
|
10 |
+
|
11 |
+
import utils.globals as globals
|
12 |
+
from chatgpt.authorization import verify_token, get_req_token
|
13 |
+
from chatgpt.fp import get_fp
|
14 |
+
from utils.Client import Client
|
15 |
+
from utils.Logger import logger
|
16 |
+
from utils.configs import chatgpt_base_url_list, sentinel_proxy_url_list, force_no_history, file_host, voice_host
|
17 |
+
|
18 |
+
|
19 |
+
def generate_current_time():
|
20 |
+
current_time = datetime.now(timezone.utc)
|
21 |
+
formatted_time = current_time.isoformat(timespec='microseconds').replace('+00:00', 'Z')
|
22 |
+
return formatted_time
|
23 |
+
|
24 |
+
|
25 |
+
headers_reject_list = [
|
26 |
+
"x-real-ip",
|
27 |
+
"x-forwarded-for",
|
28 |
+
"x-forwarded-proto",
|
29 |
+
"x-forwarded-port",
|
30 |
+
"x-forwarded-host",
|
31 |
+
"x-forwarded-server",
|
32 |
+
"cf-warp-tag-id",
|
33 |
+
"cf-visitor",
|
34 |
+
"cf-ray",
|
35 |
+
"cf-connecting-ip",
|
36 |
+
"cf-ipcountry",
|
37 |
+
"cdn-loop",
|
38 |
+
"remote-host",
|
39 |
+
"x-frame-options",
|
40 |
+
"x-xss-protection",
|
41 |
+
"x-content-type-options",
|
42 |
+
"content-security-policy",
|
43 |
+
"host",
|
44 |
+
"cookie",
|
45 |
+
"connection",
|
46 |
+
"content-length",
|
47 |
+
"content-encoding",
|
48 |
+
"x-middleware-prefetch",
|
49 |
+
"x-nextjs-data",
|
50 |
+
"purpose",
|
51 |
+
"x-forwarded-uri",
|
52 |
+
"x-forwarded-path",
|
53 |
+
"x-forwarded-method",
|
54 |
+
"x-forwarded-protocol",
|
55 |
+
"x-forwarded-scheme",
|
56 |
+
"cf-request-id",
|
57 |
+
"cf-worker",
|
58 |
+
"cf-access-client-id",
|
59 |
+
"cf-access-client-device-type",
|
60 |
+
"cf-access-client-device-model",
|
61 |
+
"cf-access-client-device-name",
|
62 |
+
"cf-access-client-device-brand",
|
63 |
+
"x-middleware-prefetch",
|
64 |
+
"x-forwarded-for",
|
65 |
+
"x-forwarded-host",
|
66 |
+
"x-forwarded-proto",
|
67 |
+
"x-forwarded-server",
|
68 |
+
"x-real-ip",
|
69 |
+
"x-forwarded-port",
|
70 |
+
"cf-connecting-ip",
|
71 |
+
"cf-ipcountry",
|
72 |
+
"cf-ray",
|
73 |
+
"cf-visitor",
|
74 |
+
]
|
75 |
+
|
76 |
+
|
77 |
+
async def get_real_req_token(token):
|
78 |
+
req_token = get_req_token(token)
|
79 |
+
if len(req_token) == 45 or req_token.startswith("eyJhbGciOi"):
|
80 |
+
return req_token
|
81 |
+
else:
|
82 |
+
req_token = get_req_token(None, token)
|
83 |
+
return req_token
|
84 |
+
|
85 |
+
|
86 |
+
def save_conversation(token, conversation_id, title=None):
|
87 |
+
if conversation_id not in globals.conversation_map:
|
88 |
+
conversation_detail = {
|
89 |
+
"id": conversation_id,
|
90 |
+
"title": title,
|
91 |
+
"update_time": generate_current_time()
|
92 |
+
}
|
93 |
+
globals.conversation_map[conversation_id] = conversation_detail
|
94 |
+
else:
|
95 |
+
globals.conversation_map[conversation_id]["update_time"] = generate_current_time()
|
96 |
+
if title:
|
97 |
+
globals.conversation_map[conversation_id]["title"] = title
|
98 |
+
if conversation_id not in globals.seed_map[token]["conversations"]:
|
99 |
+
globals.seed_map[token]["conversations"].insert(0, conversation_id)
|
100 |
+
else:
|
101 |
+
globals.seed_map[token]["conversations"].remove(conversation_id)
|
102 |
+
globals.seed_map[token]["conversations"].insert(0, conversation_id)
|
103 |
+
with open(globals.CONVERSATION_MAP_FILE, "w", encoding="utf-8") as f:
|
104 |
+
json.dump(globals.conversation_map, f, indent=4)
|
105 |
+
with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
|
106 |
+
json.dump(globals.seed_map, f, indent=4)
|
107 |
+
if title:
|
108 |
+
logger.info(f"Conversation ID: {conversation_id}, Title: {title}")
|
109 |
+
|
110 |
+
|
111 |
+
async def content_generator(r, token, history=True):
|
112 |
+
conversation_id = None
|
113 |
+
title = None
|
114 |
+
async for chunk in r.aiter_content():
|
115 |
+
try:
|
116 |
+
if history and (len(token) != 45 and not token.startswith("eyJhbGciOi")) and (not conversation_id or not title):
|
117 |
+
chat_chunk = chunk.decode('utf-8')
|
118 |
+
if chat_chunk.startswith("data: {"):
|
119 |
+
if "\n\nevent: delta" in chat_chunk:
|
120 |
+
index = chat_chunk.find("\n\nevent: delta")
|
121 |
+
chunk_data = chat_chunk[6:index]
|
122 |
+
elif "\n\ndata: {" in chat_chunk:
|
123 |
+
index = chat_chunk.find("\n\ndata: {")
|
124 |
+
chunk_data = chat_chunk[6:index]
|
125 |
+
else:
|
126 |
+
chunk_data = chat_chunk[6:]
|
127 |
+
chunk_data = chunk_data.strip()
|
128 |
+
if conversation_id is None:
|
129 |
+
conversation_id = json.loads(chunk_data).get("conversation_id")
|
130 |
+
save_conversation(token, conversation_id)
|
131 |
+
title = globals.conversation_map[conversation_id].get("title")
|
132 |
+
if title is None:
|
133 |
+
if "title" in chunk_data:
|
134 |
+
pass
|
135 |
+
title = json.loads(chunk_data).get("title")
|
136 |
+
if title:
|
137 |
+
save_conversation(token, conversation_id, title)
|
138 |
+
except Exception as e:
|
139 |
+
# logger.error(e)
|
140 |
+
# logger.error(chunk.decode('utf-8'))
|
141 |
+
pass
|
142 |
+
yield chunk
|
143 |
+
|
144 |
+
|
145 |
+
async def chatgpt_reverse_proxy(request: Request, path: str):
|
146 |
+
try:
|
147 |
+
origin_host = request.url.netloc
|
148 |
+
if request.url.is_secure:
|
149 |
+
petrol = "https"
|
150 |
+
else:
|
151 |
+
petrol = "http"
|
152 |
+
if "x-forwarded-proto" in request.headers:
|
153 |
+
petrol = request.headers["x-forwarded-proto"]
|
154 |
+
if "cf-visitor" in request.headers:
|
155 |
+
cf_visitor = json.loads(request.headers["cf-visitor"])
|
156 |
+
petrol = cf_visitor.get("scheme", petrol)
|
157 |
+
|
158 |
+
params = dict(request.query_params)
|
159 |
+
request_cookies = dict(request.cookies)
|
160 |
+
|
161 |
+
headers = {
|
162 |
+
key: value for key, value in request.headers.items()
|
163 |
+
if (key.lower() not in ["host", "origin", "referer", "priority",
|
164 |
+
"oai-device-id"] and key.lower() not in headers_reject_list)
|
165 |
+
}
|
166 |
+
|
167 |
+
base_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
|
168 |
+
if "assets/" in path:
|
169 |
+
base_url = "https://cdn.oaistatic.com"
|
170 |
+
if "file-" in path and "backend-api" not in path:
|
171 |
+
base_url = "https://files.oaiusercontent.com"
|
172 |
+
if "v1/" in path:
|
173 |
+
base_url = "https://ab.chatgpt.com"
|
174 |
+
|
175 |
+
token = request.cookies.get("token", "")
|
176 |
+
req_token = await get_real_req_token(token)
|
177 |
+
fp = get_fp(req_token).copy()
|
178 |
+
proxy_url = fp.pop("proxy_url", None)
|
179 |
+
impersonate = fp.pop("impersonate", "safari15_3")
|
180 |
+
user_agent = fp.get("user-agent")
|
181 |
+
headers.update(fp)
|
182 |
+
|
183 |
+
headers.update({
|
184 |
+
"accept-language": "en-US,en;q=0.9",
|
185 |
+
"host": base_url.replace("https://", "").replace("http://", ""),
|
186 |
+
"origin": base_url,
|
187 |
+
"referer": f"{base_url}/"
|
188 |
+
})
|
189 |
+
if "v1/initialize" in path:
|
190 |
+
headers.update({"user-agent": request.headers.get("user-agent")})
|
191 |
+
if "statsig-api-key" not in headers:
|
192 |
+
headers.update({
|
193 |
+
"statsig-sdk-type": "js-client",
|
194 |
+
"statsig-api-key": "client-tnE5GCU2F2cTxRiMbvTczMDT1jpwIigZHsZSdqiy4u",
|
195 |
+
"statsig-sdk-version": "5.1.0",
|
196 |
+
"statsig-client-time": int(time.time() * 1000),
|
197 |
+
})
|
198 |
+
|
199 |
+
token = headers.get("authorization", "").replace("Bearer ", "")
|
200 |
+
if token:
|
201 |
+
req_token = await get_real_req_token(token)
|
202 |
+
access_token = await verify_token(req_token)
|
203 |
+
headers.update({"authorization": f"Bearer {access_token}"})
|
204 |
+
|
205 |
+
data = await request.body()
|
206 |
+
|
207 |
+
history = True
|
208 |
+
if path.endswith("backend-api/conversation"):
|
209 |
+
try:
|
210 |
+
req_json = json.loads(data)
|
211 |
+
history = not req_json.get("history_and_training_disabled", False)
|
212 |
+
except Exception:
|
213 |
+
pass
|
214 |
+
if force_no_history:
|
215 |
+
history = False
|
216 |
+
req_json = json.loads(data)
|
217 |
+
req_json["history_and_training_disabled"] = True
|
218 |
+
data = json.dumps(req_json).encode("utf-8")
|
219 |
+
|
220 |
+
|
221 |
+
if sentinel_proxy_url_list and "backend-api/sentinel/chat-requirements" in path:
|
222 |
+
client = Client(proxy=random.choice(sentinel_proxy_url_list))
|
223 |
+
else:
|
224 |
+
client = Client(proxy=proxy_url, impersonate=impersonate)
|
225 |
+
try:
|
226 |
+
background = BackgroundTask(client.close)
|
227 |
+
r = await client.request(request.method, f"{base_url}/{path}", params=params, headers=headers,
|
228 |
+
cookies=request_cookies, data=data, stream=True, allow_redirects=False)
|
229 |
+
if r.status_code == 307 or r.status_code == 302 or r.status_code == 301:
|
230 |
+
return Response(status_code=307,
|
231 |
+
headers={"Location": r.headers.get("Location")
|
232 |
+
.replace("ab.chatgpt.com", origin_host)
|
233 |
+
.replace("chatgpt.com", origin_host)
|
234 |
+
.replace("cdn.oaistatic.com", origin_host)
|
235 |
+
.replace("https", petrol)}, background=background)
|
236 |
+
elif 'stream' in r.headers.get("content-type", ""):
|
237 |
+
logger.info(f"Request token: {req_token}")
|
238 |
+
logger.info(f"Request proxy: {proxy_url}")
|
239 |
+
logger.info(f"Request UA: {user_agent}")
|
240 |
+
logger.info(f"Request impersonate: {impersonate}")
|
241 |
+
conv_key = r.cookies.get("conv_key", "")
|
242 |
+
response = StreamingResponse(content_generator(r, token, history), media_type=r.headers.get("content-type", ""),
|
243 |
+
background=background)
|
244 |
+
response.set_cookie("conv_key", value=conv_key)
|
245 |
+
return response
|
246 |
+
elif 'image' in r.headers.get("content-type", "") or "audio" in r.headers.get("content-type", "") or "video" in r.headers.get("content-type", ""):
|
247 |
+
rheaders = dict(r.headers)
|
248 |
+
response = Response(content=await r.acontent(), headers=rheaders,
|
249 |
+
status_code=r.status_code, background=background)
|
250 |
+
return response
|
251 |
+
else:
|
252 |
+
if "/backend-api/conversation" in path or "/register-websocket" in path:
|
253 |
+
response = Response(content=(await r.acontent()), media_type=r.headers.get("content-type"),
|
254 |
+
status_code=r.status_code, background=background)
|
255 |
+
else:
|
256 |
+
content = await r.atext()
|
257 |
+
if "public-api/" in path:
|
258 |
+
content = (content
|
259 |
+
.replace("https://ab.chatgpt.com", f"{petrol}://{origin_host}")
|
260 |
+
.replace("https://cdn.oaistatic.com", f"{petrol}://{origin_host}")
|
261 |
+
.replace("webrtc.chatgpt.com", voice_host if voice_host else "webrtc.chatgpt.com")
|
262 |
+
.replace("files.oaiusercontent.com", file_host if file_host else "files.oaiusercontent.com")
|
263 |
+
.replace("chatgpt.com/ces", f"{origin_host}/ces")
|
264 |
+
)
|
265 |
+
else:
|
266 |
+
content = (content
|
267 |
+
.replace("https://ab.chatgpt.com", f"{petrol}://{origin_host}")
|
268 |
+
.replace("https://cdn.oaistatic.com", f"{petrol}://{origin_host}")
|
269 |
+
.replace("webrtc.chatgpt.com", voice_host if voice_host else "webrtc.chatgpt.com")
|
270 |
+
.replace("files.oaiusercontent.com", file_host if file_host else "files.oaiusercontent.com")
|
271 |
+
.replace("https://chatgpt.com", f"{petrol}://{origin_host}")
|
272 |
+
.replace("chatgpt.com/ces", f"{origin_host}/ces")
|
273 |
+
)
|
274 |
+
rheaders = dict(r.headers)
|
275 |
+
content_type = rheaders.get("content-type", "")
|
276 |
+
cache_control = rheaders.get("cache-control", "")
|
277 |
+
expires = rheaders.get("expires", "")
|
278 |
+
content_disposition = rheaders.get("content-disposition", "")
|
279 |
+
rheaders = {
|
280 |
+
"cache-control": cache_control,
|
281 |
+
"content-type": content_type,
|
282 |
+
"expires": expires,
|
283 |
+
"content-disposition": content_disposition
|
284 |
+
}
|
285 |
+
response = Response(content=content, headers=rheaders,
|
286 |
+
status_code=r.status_code, background=background)
|
287 |
+
return response
|
288 |
+
except Exception:
|
289 |
+
await client.close()
|
290 |
+
except HTTPException as e:
|
291 |
+
raise e
|
292 |
+
except Exception as e:
|
293 |
+
raise HTTPException(status_code=500, detail=str(e))
|
gateway/route.py
ADDED
File without changes
|
gateway/share.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
|
5 |
+
import jwt
|
6 |
+
from fastapi import Request, HTTPException, Security
|
7 |
+
from fastapi.responses import Response
|
8 |
+
from fastapi.security import HTTPAuthorizationCredentials
|
9 |
+
|
10 |
+
import utils.globals as globals
|
11 |
+
from app import app, security_scheme
|
12 |
+
from chatgpt.authorization import verify_token
|
13 |
+
from chatgpt.fp import get_fp
|
14 |
+
from gateway.reverseProxy import get_real_req_token
|
15 |
+
from utils.Client import Client
|
16 |
+
from utils.Logger import logger
|
17 |
+
from utils.configs import proxy_url_list, chatgpt_base_url_list, authorization_list
|
18 |
+
|
19 |
+
base_headers = {
|
20 |
+
'accept': '*/*',
|
21 |
+
'accept-encoding': 'gzip, deflate, br, zstd',
|
22 |
+
'accept-language': 'en-US,en;q=0.9',
|
23 |
+
'content-type': 'application/json',
|
24 |
+
'oai-language': 'en-US',
|
25 |
+
'priority': 'u=1, i',
|
26 |
+
'sec-fetch-dest': 'empty',
|
27 |
+
'sec-fetch-mode': 'cors',
|
28 |
+
'sec-fetch-site': 'same-origin',
|
29 |
+
}
|
30 |
+
|
31 |
+
|
32 |
+
def verify_authorization(bearer_token):
|
33 |
+
if not bearer_token:
|
34 |
+
raise HTTPException(status_code=401, detail="Authorization header is missing")
|
35 |
+
if bearer_token not in authorization_list:
|
36 |
+
raise HTTPException(status_code=401, detail="Invalid authorization")
|
37 |
+
|
38 |
+
|
39 |
+
@app.get("/seedtoken")
|
40 |
+
async def get_seedtoken(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
|
41 |
+
verify_authorization(credentials.credentials)
|
42 |
+
try:
|
43 |
+
params = request.query_params
|
44 |
+
seed = params.get("seed")
|
45 |
+
|
46 |
+
if seed:
|
47 |
+
if seed not in globals.seed_map:
|
48 |
+
raise HTTPException(status_code=404, detail=f"Seed '{seed}' not found")
|
49 |
+
return {
|
50 |
+
"status": "success",
|
51 |
+
"data": {
|
52 |
+
"seed": seed,
|
53 |
+
"token": globals.seed_map[seed]["token"]
|
54 |
+
}
|
55 |
+
}
|
56 |
+
|
57 |
+
token_map = {
|
58 |
+
seed: data["token"]
|
59 |
+
for seed, data in globals.seed_map.items()
|
60 |
+
}
|
61 |
+
return {"status": "success", "data": token_map}
|
62 |
+
|
63 |
+
except Exception as e:
|
64 |
+
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
65 |
+
|
66 |
+
|
67 |
+
@app.post("/seedtoken")
|
68 |
+
async def set_seedtoken(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
|
69 |
+
verify_authorization(credentials.credentials)
|
70 |
+
data = await request.json()
|
71 |
+
|
72 |
+
seed = data.get("seed")
|
73 |
+
token = data.get("token")
|
74 |
+
|
75 |
+
if seed not in globals.seed_map:
|
76 |
+
globals.seed_map[seed] = {
|
77 |
+
"token": token,
|
78 |
+
"conversations": []
|
79 |
+
}
|
80 |
+
else:
|
81 |
+
globals.seed_map[seed]["token"] = token
|
82 |
+
|
83 |
+
with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
|
84 |
+
json.dump(globals.seed_map, f, indent=4)
|
85 |
+
|
86 |
+
return {"status": "success", "message": "Token updated successfully"}
|
87 |
+
|
88 |
+
|
89 |
+
@app.delete("/seedtoken")
|
90 |
+
async def delete_seedtoken(request: Request, credentials: HTTPAuthorizationCredentials = Security(security_scheme)):
|
91 |
+
verify_authorization(credentials.credentials)
|
92 |
+
|
93 |
+
try:
|
94 |
+
data = await request.json()
|
95 |
+
seed = data.get("seed")
|
96 |
+
|
97 |
+
if seed == "clear":
|
98 |
+
globals.seed_map.clear()
|
99 |
+
with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
|
100 |
+
json.dump(globals.seed_map, f, indent=4)
|
101 |
+
return {"status": "success", "message": "All seeds deleted successfully"}
|
102 |
+
|
103 |
+
if not seed:
|
104 |
+
raise HTTPException(status_code=400, detail="Missing required field: seed")
|
105 |
+
|
106 |
+
if seed not in globals.seed_map:
|
107 |
+
raise HTTPException(status_code=404, detail=f"Seed '{seed}' not found")
|
108 |
+
del globals.seed_map[seed]
|
109 |
+
|
110 |
+
with open(globals.SEED_MAP_FILE, "w", encoding="utf-8") as f:
|
111 |
+
json.dump(globals.seed_map, f, indent=4)
|
112 |
+
|
113 |
+
return {
|
114 |
+
"status": "success",
|
115 |
+
"message": f"Seed '{seed}' deleted successfully"
|
116 |
+
}
|
117 |
+
|
118 |
+
except json.JSONDecodeError:
|
119 |
+
raise HTTPException(status_code=400, detail="Invalid JSON data")
|
120 |
+
except Exception as e:
|
121 |
+
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
122 |
+
|
123 |
+
|
124 |
+
async def chatgpt_account_check(access_token):
|
125 |
+
auth_info = {}
|
126 |
+
client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
|
127 |
+
try:
|
128 |
+
host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
|
129 |
+
req_token = await get_real_req_token(access_token)
|
130 |
+
access_token = await verify_token(req_token)
|
131 |
+
fp = get_fp(req_token).copy()
|
132 |
+
proxy_url = fp.pop("proxy_url", None)
|
133 |
+
impersonate = fp.pop("impersonate", "safari15_3")
|
134 |
+
|
135 |
+
headers = base_headers.copy()
|
136 |
+
headers.update(fp)
|
137 |
+
headers.update({"authorization": f"Bearer {access_token}"})
|
138 |
+
|
139 |
+
client = Client(proxy=proxy_url, impersonate=impersonate)
|
140 |
+
r = await client.get(f"{host_url}/backend-api/models?history_and_training_disabled=false", headers=headers,
|
141 |
+
timeout=10)
|
142 |
+
if r.status_code != 200:
|
143 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
144 |
+
models = r.json()
|
145 |
+
r = await client.get(f"{host_url}/backend-api/accounts/check/v4-2023-04-27", headers=headers, timeout=10)
|
146 |
+
if r.status_code != 200:
|
147 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
148 |
+
accounts_info = r.json()
|
149 |
+
|
150 |
+
auth_info.update({"models": models["models"]})
|
151 |
+
auth_info.update({"accounts_info": accounts_info})
|
152 |
+
|
153 |
+
account_ordering = accounts_info.get("account_ordering", [])
|
154 |
+
is_deactivated = True
|
155 |
+
plan_type = None
|
156 |
+
team_ids = []
|
157 |
+
for account in account_ordering:
|
158 |
+
this_is_deactivated = accounts_info['accounts'].get(account, {}).get("account", {}).get("is_deactivated", False)
|
159 |
+
this_plan_type = accounts_info['accounts'].get(account, {}).get("account", {}).get("plan_type", "free")
|
160 |
+
|
161 |
+
if not this_is_deactivated:
|
162 |
+
is_deactivated = False
|
163 |
+
|
164 |
+
if "team" in this_plan_type and not this_is_deactivated:
|
165 |
+
plan_type = this_plan_type
|
166 |
+
team_ids.append(account)
|
167 |
+
elif plan_type is None:
|
168 |
+
plan_type = this_plan_type
|
169 |
+
|
170 |
+
auth_info.update({"accountCheckInfo": {
|
171 |
+
"is_deactivated": is_deactivated,
|
172 |
+
"plan_type": plan_type,
|
173 |
+
"team_ids": team_ids
|
174 |
+
}})
|
175 |
+
|
176 |
+
return auth_info
|
177 |
+
except Exception as e:
|
178 |
+
logger.error(f"chatgpt_account_check: {e}")
|
179 |
+
return {}
|
180 |
+
finally:
|
181 |
+
await client.close()
|
182 |
+
|
183 |
+
|
184 |
+
async def chatgpt_refresh(refresh_token):
|
185 |
+
client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
|
186 |
+
try:
|
187 |
+
data = {
|
188 |
+
"client_id": "pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh",
|
189 |
+
"grant_type": "refresh_token",
|
190 |
+
"redirect_uri": "com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback",
|
191 |
+
"refresh_token": refresh_token
|
192 |
+
}
|
193 |
+
r = await client.post("https://auth0.openai.com/oauth/token", json=data, timeout=10)
|
194 |
+
if r.status_code != 200:
|
195 |
+
raise HTTPException(status_code=r.status_code, detail=r.text)
|
196 |
+
res = r.json()
|
197 |
+
auth_info = {}
|
198 |
+
auth_info.update(res)
|
199 |
+
auth_info.update({"refresh_token": refresh_token})
|
200 |
+
auth_info.update({"accessToken": res.get("access_token", "")})
|
201 |
+
return auth_info
|
202 |
+
except Exception as e:
|
203 |
+
logger.error(f"chatgpt_refresh: {e}")
|
204 |
+
return {}
|
205 |
+
finally:
|
206 |
+
await client.close()
|
207 |
+
|
208 |
+
|
209 |
+
@app.post("/auth/refresh")
|
210 |
+
async def refresh(request: Request):
|
211 |
+
auth_info = {}
|
212 |
+
form_data = await request.form()
|
213 |
+
|
214 |
+
auth_info.update(form_data)
|
215 |
+
|
216 |
+
access_token = auth_info.get("access_token", auth_info.get("accessToken", ""))
|
217 |
+
refresh_token = auth_info.get("refresh_token", "")
|
218 |
+
|
219 |
+
if not refresh_token and not access_token:
|
220 |
+
raise HTTPException(status_code=401, detail="refresh_token or access_token is required")
|
221 |
+
|
222 |
+
need_refresh = True
|
223 |
+
if access_token:
|
224 |
+
try:
|
225 |
+
access_token_info = jwt.decode(access_token, options={"verify_signature": False})
|
226 |
+
exp = access_token_info.get("exp", 0)
|
227 |
+
if exp > int(time.time()) + 60 * 60 * 24 * 5:
|
228 |
+
need_refresh = False
|
229 |
+
except Exception as e:
|
230 |
+
logger.error(f"access_token: {e}")
|
231 |
+
|
232 |
+
if refresh_token and need_refresh:
|
233 |
+
chatgpt_refresh_info = await chatgpt_refresh(refresh_token)
|
234 |
+
if chatgpt_refresh_info:
|
235 |
+
auth_info.update(chatgpt_refresh_info)
|
236 |
+
access_token = auth_info.get("accessToken", "")
|
237 |
+
account_check_info = await chatgpt_account_check(access_token)
|
238 |
+
if account_check_info:
|
239 |
+
auth_info.update(account_check_info)
|
240 |
+
auth_info.update({"accessToken": access_token})
|
241 |
+
return Response(content=json.dumps(auth_info), media_type="application/json")
|
242 |
+
elif access_token:
|
243 |
+
account_check_info = await chatgpt_account_check(access_token)
|
244 |
+
if account_check_info:
|
245 |
+
auth_info.update(account_check_info)
|
246 |
+
auth_info.update({"accessToken": access_token})
|
247 |
+
return Response(content=json.dumps(auth_info), media_type="application/json")
|
248 |
+
|
249 |
+
raise HTTPException(status_code=401, detail="Unauthorized")
|
250 |
+
|
251 |
+
|
gateway/v1.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
from fastapi import Request
|
4 |
+
from fastapi.responses import Response
|
5 |
+
|
6 |
+
from app import app
|
7 |
+
from gateway.reverseProxy import chatgpt_reverse_proxy
|
8 |
+
from utils.kv_utils import set_value_for_key
|
9 |
+
|
10 |
+
|
11 |
+
@app.post("/v1/initialize")
|
12 |
+
async def initialize(request: Request):
|
13 |
+
initialize_response = (await chatgpt_reverse_proxy(request, f"v1/initialize"))
|
14 |
+
initialize_str = initialize_response.body.decode('utf-8')
|
15 |
+
initialize_json = json.loads(initialize_str)
|
16 |
+
set_value_for_key(initialize_json, "ip", "8.8.8.8")
|
17 |
+
set_value_for_key(initialize_json, "country", "US")
|
18 |
+
return Response(content=json.dumps(initialize_json, indent=4), media_type="application/json")
|
19 |
+
|
20 |
+
|
21 |
+
@app.post("/v1/rgstr")
|
22 |
+
async def rgstr():
|
23 |
+
return Response(status_code=202, content=json.dumps({"success": True}, indent=4), media_type="application/json")
|
24 |
+
|
25 |
+
|
26 |
+
@app.get("/ces/v1/projects/oai/settings")
|
27 |
+
async def ces_v1_projects_oai_settings():
|
28 |
+
return Response(status_code=200, content=json.dumps({"integrations":{"Segment.io":{"apiHost":"chatgpt.com/ces/v1","apiKey":"oai"}}}, indent=4), media_type="application/json")
|
29 |
+
|
30 |
+
|
31 |
+
@app.post("/ces/v1/{path:path}")
|
32 |
+
async def ces_v1():
|
33 |
+
return Response(status_code=202, content=json.dumps({"success": True}, indent=4), media_type="application/json")
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.115.3
|
2 |
+
python-multipart==0.0.13
|
3 |
+
curl_cffi==0.7.3
|
4 |
+
uvicorn
|
5 |
+
tiktoken
|
6 |
+
python-dotenv
|
7 |
+
websockets
|
8 |
+
pillow
|
9 |
+
pybase64
|
10 |
+
jinja2
|
11 |
+
APScheduler
|
12 |
+
ua-generator
|
13 |
+
pyjwt
|
templates/chatgpt.html
ADDED
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html data-build="prod-11173c28d7974347784902fe1d57aae3a20e661b" dir="ltr" class="">
|
3 |
+
<head>
|
4 |
+
<meta charSet="UTF-8"/>
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
6 |
+
<link rel="preconnect" href=""/>
|
7 |
+
<meta name="robots" content="index, follow"/>
|
8 |
+
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
|
9 |
+
<meta name="apple-itunes-app" content="app-id=6448311069"/>
|
10 |
+
<link rel="apple-touch-icon" sizes="180x180" href="/assets/apple-touch-icon-mz9nytnj.webp"/>
|
11 |
+
<link rel="icon" type="image/png" sizes="32x32" href="/assets/favicon-32x32-p4ktpm1x.webp" media="(prefers-color-scheme: light)"/>
|
12 |
+
<link rel="icon" type="image/png" sizes="32x32" href="/assets/favicon-dark-32x32-gt5kfzyp.webp" media="(prefers-color-scheme: dark)"/>
|
13 |
+
<link rel="icon" type="image/svg+xml" sizes="32x32" href="/assets/favicon-o20kmmos.svg"/>
|
14 |
+
<title>ChatGPT</title>
|
15 |
+
<meta name="description" content="ChatGPT helps you get answers, find inspiration and be more productive. It is free to use and easy to try. Just ask and ChatGPT can help with writing, learning, brainstorming and more."/>
|
16 |
+
<meta name="keyword" content="ai chat,ai,chap gpt,chat gbt,chat gpt 3,chat gpt login,chat gpt website,chat gpt,chat gtp,chat openai,chat,chatai,chatbot gpt,chatg,chatgpt login,chatgpt,gpt chat,open ai,openai chat,openai chatgpt,openai"/>
|
17 |
+
<meta property="og:description" content="A conversational AI system that listens, learns, and challenges"/>
|
18 |
+
<meta property="og:title" content="ChatGPT"/>
|
19 |
+
<meta property="og:image" content="/assets/chatgpt-share-og-u7j5uyao.webp"/>
|
20 |
+
<meta property="og:url" content="https://chatgpt.com"/>
|
21 |
+
<link rel="modulepreload" href="/assets/manifest-7d43a138.js"/>
|
22 |
+
<link rel="modulepreload" href="/assets/mxkyxjre6muko6z4.js"/>
|
23 |
+
<link rel="modulepreload" href="/assets/nqo5y2f0dorhrqsr.js"/>
|
24 |
+
<link rel="modulepreload" href="/assets/fpwmsu1awpj0g2ko.js"/>
|
25 |
+
<link rel="modulepreload" href="/assets/dh0yl0m9q337gmci.js"/>
|
26 |
+
<link rel="modulepreload" href="/assets/ihbvqeoqhbkuefft.js"/>
|
27 |
+
<link rel="modulepreload" href="/assets/ty18ymk2wf2zvd35.js"/>
|
28 |
+
<link rel="modulepreload" href="/assets/ktiwgucld5a8s55m.js"/>
|
29 |
+
<link rel="modulepreload" href="/assets/dgcxf4c1lo6y3h3a.js"/>
|
30 |
+
<link rel="modulepreload" href="/assets/nb34aa8izknzna97.js"/>
|
31 |
+
<link rel="modulepreload" href="/assets/l697z2ouob9b6hw7.js"/>
|
32 |
+
<link rel="modulepreload" href="/assets/odhzv2q19wc7ynyt.js"/>
|
33 |
+
<link rel="modulepreload" href="/assets/k56enwh74zn4hbwt.js"/>
|
34 |
+
<link rel="modulepreload" href="/assets/mfdhaorj0sghs4gl.js"/>
|
35 |
+
<link rel="modulepreload" href="/assets/fy8orb1z72ox92eg.js"/>
|
36 |
+
<link rel="modulepreload" href="/assets/cnv95wiq9am77uyo.js"/>
|
37 |
+
<link rel="modulepreload" href="/assets/gtbc1g1q4ztw05rv.js"/>
|
38 |
+
<link rel="modulepreload" href="/assets/dvl2tfqalthh42cv.js"/>
|
39 |
+
<link rel="modulepreload" href="/assets/cb0x1wlgm93n2hpu.js"/>
|
40 |
+
<link rel="modulepreload" href="/assets/buun9i8g5c97ea0e.js"/>
|
41 |
+
<link rel="modulepreload" href="/assets/ezyyguzloavludkz.js"/>
|
42 |
+
<link rel="modulepreload" href="/assets/usnn0symwgs6mmsd.js"/>
|
43 |
+
<link rel="modulepreload" href="/assets/jjr9on9cxlrbskjq.js"/>
|
44 |
+
<link rel="stylesheet" href="/assets/root-e6p3mfos.css"/>
|
45 |
+
<link rel="stylesheet" href="/assets/conversation-small-kq10986g.css"/>
|
46 |
+
</head>
|
47 |
+
<body class="">
|
48 |
+
<script>
|
49 |
+
!function() {
|
50 |
+
try {
|
51 |
+
var d = document.documentElement
|
52 |
+
, c = d.classList;
|
53 |
+
c.remove('light', 'dark');
|
54 |
+
var e = localStorage.getItem('theme');
|
55 |
+
if ('system' === e || (!e && true)) {
|
56 |
+
var t = '(prefers-color-scheme: dark)'
|
57 |
+
, m = window.matchMedia(t);
|
58 |
+
if (m.media !== t || m.matches) {
|
59 |
+
d.style.colorScheme = 'dark';
|
60 |
+
c.add('dark')
|
61 |
+
} else {
|
62 |
+
d.style.colorScheme = 'light';
|
63 |
+
c.add('light')
|
64 |
+
}
|
65 |
+
} else if (e) {
|
66 |
+
c.add(e || '')
|
67 |
+
}
|
68 |
+
if (e === 'light' || e === 'dark')
|
69 |
+
d.style.colorScheme = e
|
70 |
+
} catch (e) {}
|
71 |
+
}()
|
72 |
+
</script>
|
73 |
+
<div class="relative flex h-full w-full overflow-hidden transition-colors z-0">
|
74 |
+
<div class="z-[21] flex-shrink-0 overflow-x-hidden bg-token-sidebar-surface-primary max-md:!w-0" style="width:260px">
|
75 |
+
<div class="h-full w-[260px]">
|
76 |
+
<div class="flex h-full min-h-0 flex-col">
|
77 |
+
<div class="draggable relative h-full w-full flex-1 items-start border-white/20">
|
78 |
+
<h2 style="position:absolute;border:0;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);white-space:nowrap;word-wrap:normal">Chat history</h2>
|
79 |
+
<nav class="flex h-full w-full flex-col px-3" aria-label="Chat history">
|
80 |
+
<div class="flex justify-between flex h-[60px] items-center md:h-header-height">
|
81 |
+
<span class="hidden"></span>
|
82 |
+
<span class="flex" data-state="closed">
|
83 |
+
<button aria-label="Close sidebar" data-testid="close-sidebar-button" class="h-10 rounded-lg px-2 text-token-text-secondary focus-visible:outline-0 disabled:text-token-text-quaternary focus-visible:bg-token-sidebar-surface-secondary enabled:hover:bg-token-sidebar-surface-secondary no-draggable">
|
84 |
+
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg" class="icon-xl-heavy max-md:hidden">
|
85 |
+
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.85719 3H15.1428C16.2266 2.99999 17.1007 2.99998 17.8086 3.05782C18.5375 3.11737 19.1777 3.24318 19.77 3.54497C20.7108 4.02433 21.4757 4.78924 21.955 5.73005C22.2568 6.32234 22.3826 6.96253 22.4422 7.69138C22.5 8.39925 22.5 9.27339 22.5 10.3572V13.6428C22.5 14.7266 22.5 15.6008 22.4422 16.3086C22.3826 17.0375 22.2568 17.6777 21.955 18.27C21.4757 19.2108 20.7108 19.9757 19.77 20.455C19.1777 20.7568 18.5375 20.8826 17.8086 20.9422C17.1008 21 16.2266 21 15.1428 21H8.85717C7.77339 21 6.89925 21 6.19138 20.9422C5.46253 20.8826 4.82234 20.7568 4.23005 20.455C3.28924 19.9757 2.52433 19.2108 2.04497 18.27C1.74318 17.6777 1.61737 17.0375 1.55782 16.3086C1.49998 15.6007 1.49999 14.7266 1.5 13.6428V10.3572C1.49999 9.27341 1.49998 8.39926 1.55782 7.69138C1.61737 6.96253 1.74318 6.32234 2.04497 5.73005C2.52433 4.78924 3.28924 4.02433 4.23005 3.54497C4.82234 3.24318 5.46253 3.11737 6.19138 3.05782C6.89926 2.99998 7.77341 2.99999 8.85719 3ZM6.35424 5.05118C5.74907 5.10062 5.40138 5.19279 5.13803 5.32698C4.57354 5.6146 4.1146 6.07354 3.82698 6.63803C3.69279 6.90138 3.60062 7.24907 3.55118 7.85424C3.50078 8.47108 3.5 9.26339 3.5 10.4V13.6C3.5 14.7366 3.50078 15.5289 3.55118 16.1458C3.60062 16.7509 3.69279 17.0986 3.82698 17.362C4.1146 17.9265 4.57354 18.3854 5.13803 18.673C5.40138 18.8072 5.74907 18.8994 6.35424 18.9488C6.97108 18.9992 7.76339 19 8.9 19H9.5V5H8.9C7.76339 5 6.97108 5.00078 6.35424 5.05118ZM11.5 5V19H15.1C16.2366 19 17.0289 18.9992 17.6458 18.9488C18.2509 18.8994 18.5986 18.8072 18.862 18.673C19.4265 18.3854 19.8854 17.9265 20.173 17.362C20.3072 17.0986 20.3994 16.7509 20.4488 16.1458C20.4992 15.5289 20.5 14.7366 20.5 13.6V10.4C20.5 9.26339 20.4992 8.47108 20.4488 7.85424C20.3994 7.24907 20.3072 6.90138 20.173 6.63803C19.8854 6.07354 19.4265 5.6146 18.862 5.32698C18.5986 5.19279 18.2509 5.10062 17.6458 5.05118C17.0289 5.00078 16.2366 5 15.1 5H11.5ZM5 8.5C5 7.94772 5.44772 7.5 6 7.5H7C7.55229 7.5 8 7.94772 8 8.5C8 9.05229 7.55229 9.5 7 9.5H6C5.44772 9.5 5 9.05229 5 8.5ZM5 12C5 11.4477 5.44772 11 6 11H7C7.55229 11 8 11.4477 8 12C8 12.5523 7.55229 13 7 13H6C5.44772 13 5 12.5523 5 12Z" fill="currentColor"></path>
|
86 |
+
</svg>
|
87 |
+
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg" class="icon-xl-heavy md:hidden">
|
88 |
+
<path fill-rule="evenodd" clip-rule="evenodd" d="M3 8C3 7.44772 3.44772 7 4 7H20C20.5523 7 21 7.44772 21 8C21 8.55228 20.5523 9 20 9H4C3.44772 9 3 8.55228 3 8ZM3 16C3 15.4477 3.44772 15 4 15H14C14.5523 15 15 15.4477 15 16C15 16.5523 14.5523 17 14 17H4C3.44772 17 3 16.5523 3 16Z" fill="currentColor"></path>
|
89 |
+
</svg>
|
90 |
+
</button>
|
91 |
+
</span>
|
92 |
+
<div class="flex">
|
93 |
+
<span class="hidden"></span>
|
94 |
+
<span class="flex" data-state="closed">
|
95 |
+
<button aria-label="New chat" data-testid="create-new-chat-button" class="h-10 rounded-lg px-2 text-token-text-secondary focus-visible:outline-0 disabled:text-token-text-quaternary focus-visible:bg-token-sidebar-surface-secondary enabled:hover:bg-token-sidebar-surface-secondary">
|
96 |
+
<svg width="24" height="24" viewBox="0 0 24 24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="icon-xl-heavy">
|
97 |
+
<path d="M15.6729 3.91287C16.8918 2.69392 18.8682 2.69392 20.0871 3.91287C21.3061 5.13182 21.3061 7.10813 20.0871 8.32708L14.1499 14.2643C13.3849 15.0293 12.3925 15.5255 11.3215 15.6785L9.14142 15.9899C8.82983 16.0344 8.51546 15.9297 8.29289 15.7071C8.07033 15.4845 7.96554 15.1701 8.01005 14.8586L8.32149 12.6785C8.47449 11.6075 8.97072 10.615 9.7357 9.85006L15.6729 3.91287ZM18.6729 5.32708C18.235 4.88918 17.525 4.88918 17.0871 5.32708L11.1499 11.2643C10.6909 11.7233 10.3932 12.3187 10.3014 12.9613L10.1785 13.8215L11.0386 13.6986C11.6812 13.6068 12.2767 13.3091 12.7357 12.8501L18.6729 6.91287C19.1108 6.47497 19.1108 5.76499 18.6729 5.32708ZM11 3.99929C11.0004 4.55157 10.5531 4.99963 10.0008 5.00007C9.00227 5.00084 8.29769 5.00827 7.74651 5.06064C7.20685 5.11191 6.88488 5.20117 6.63803 5.32695C6.07354 5.61457 5.6146 6.07351 5.32698 6.63799C5.19279 6.90135 5.10062 7.24904 5.05118 7.8542C5.00078 8.47105 5 9.26336 5 10.4V13.6C5 14.7366 5.00078 15.5289 5.05118 16.1457C5.10062 16.7509 5.19279 17.0986 5.32698 17.3619C5.6146 17.9264 6.07354 18.3854 6.63803 18.673C6.90138 18.8072 7.24907 18.8993 7.85424 18.9488C8.47108 18.9992 9.26339 19 10.4 19H13.6C14.7366 19 15.5289 18.9992 16.1458 18.9488C16.7509 18.8993 17.0986 18.8072 17.362 18.673C17.9265 18.3854 18.3854 17.9264 18.673 17.3619C18.7988 17.1151 18.8881 16.7931 18.9393 16.2535C18.9917 15.7023 18.9991 14.9977 18.9999 13.9992C19.0003 13.4469 19.4484 12.9995 20.0007 13C20.553 13.0004 21.0003 13.4485 20.9999 14.0007C20.9991 14.9789 20.9932 15.7808 20.9304 16.4426C20.8664 17.116 20.7385 17.7136 20.455 18.2699C19.9757 19.2107 19.2108 19.9756 18.27 20.455C17.6777 20.7568 17.0375 20.8826 16.3086 20.9421C15.6008 21 14.7266 21 13.6428 21H10.3572C9.27339 21 8.39925 21 7.69138 20.9421C6.96253 20.8826 6.32234 20.7568 5.73005 20.455C4.78924 19.9756 4.02433 19.2107 3.54497 18.2699C3.24318 17.6776 3.11737 17.0374 3.05782 16.3086C2.99998 15.6007 2.99999 14.7266 3 13.6428V10.3572C2.99999 9.27337 2.99998 8.39922 3.05782 7.69134C3.11737 6.96249 3.24318 6.3223 3.54497 5.73001C4.02433 4.7892 4.78924 4.0243 5.73005 3.54493C6.28633 3.26149 6.88399 3.13358 7.55735 3.06961C8.21919 3.00673 9.02103 3.00083 9.99922 3.00007C10.5515 2.99964 10.9996 3.447 11 3.99929Z" fill="currentColor"></path>
|
98 |
+
</svg>
|
99 |
+
</button>
|
100 |
+
</span>
|
101 |
+
</div>
|
102 |
+
</div>
|
103 |
+
<div class="flex-col flex-1 transition-opacity duration-500 relative -mr-2 pr-2 overflow-y-auto">
|
104 |
+
<div class="group/sidebar">
|
105 |
+
<div class="flex flex-col gap-2 text-token-text-primary text-sm h-full justify-center items-center empty:hidden mt-5 pb-2"></div>
|
106 |
+
</div>
|
107 |
+
</div>
|
108 |
+
</nav>
|
109 |
+
</div>
|
110 |
+
</div>
|
111 |
+
</div>
|
112 |
+
</div>
|
113 |
+
<div class="relative flex h-full max-w-full flex-1 flex-col overflow-hidden">
|
114 |
+
<main class="relative h-full w-full flex-1 overflow-auto transition-width">
|
115 |
+
<div role="presentation" class="composer-parent flex h-full flex-col focus-visible:outline-0">
|
116 |
+
<div class="flex-1 overflow-hidden @container/thread">
|
117 |
+
<div class="relative h-full">
|
118 |
+
<div class="absolute left-0 right-0">
|
119 |
+
<div class="draggable no-draggable-children sticky top-0 p-3 mb-1.5 flex items-center justify-between z-10 h-header-height font-semibold bg-token-main-surface-primary max-md:hidden">
|
120 |
+
<div class="absolute start-1/2 ltr:-translate-x-1/2 rtl:translate-x-1/2"></div>
|
121 |
+
<div class="flex items-center gap-0 overflow-hidden"></div>
|
122 |
+
<div class="gap-2 flex items-center pr-1 leading-[0]"></div>
|
123 |
+
</div>
|
124 |
+
</div>
|
125 |
+
<div class="flex h-full flex-col items-center justify-center text-token-text-primary">
|
126 |
+
<div class="h-full w-full lg:py-[18px]">
|
127 |
+
<div class="m-auto text-base px-3 md:px-4 w-full md:px-5 lg:px-4 xl:px-5 h-full">
|
128 |
+
<div class="mx-auto flex h-full w-full flex-col text-base lg:justify-center md:max-w-3xl lg:max-w-[40rem] xl:max-w-[48rem]">
|
129 |
+
<div class="mb-7 hidden text-center lg:block">
|
130 |
+
<div class="relative inline-flex justify-center text-center text-2xl font-semibold leading-9">
|
131 |
+
<h1>What can I help with?</h1>
|
132 |
+
</div>
|
133 |
+
</div>
|
134 |
+
<div class="justify-center mt-[var(--screen-optical-compact-offset-amount)] flex h-full flex-shrink flex-col items-center overflow-hidden text-token-text-primary lg:hidden" style="opacity:0;will-change:opacity">
|
135 |
+
<div class="relative inline-flex justify-center text-center text-2xl font-semibold leading-9">
|
136 |
+
<h1>What can I help with?</h1>
|
137 |
+
</div>
|
138 |
+
<div class="h-[116px]" style="opacity:0;will-change:opacity"></div>
|
139 |
+
</div>
|
140 |
+
<div class="lg:absolute lg:bottom-8 lg:left-0 lg:w-full">
|
141 |
+
<div class="mx-auto flex h-full w-full flex-col text-base lg:justify-center md:max-w-3xl lg:max-w-[40rem] xl:max-w-[48rem]">
|
142 |
+
<div class="block z-20"></div>
|
143 |
+
</div>
|
144 |
+
</div>
|
145 |
+
<div class="w-full">
|
146 |
+
<div class="flex justify-center"></div>
|
147 |
+
<form class="w-full" type="button" aria-haspopup="dialog" aria-expanded="false" aria-controls="radix-:Ruuijqaaklj5:" data-state="closed">
|
148 |
+
<div class="relative flex h-full max-w-full flex-1 flex-col">
|
149 |
+
<div class="group relative flex w-full items-center">
|
150 |
+
<div id="composer-background" class="flex w-full cursor-text flex-col rounded-3xl px-2.5 py-1 transition-colors contain-inline-size bg-[#f4f4f4] dark:bg-token-main-surface-secondary">
|
151 |
+
<div class="flex min-h-[44px] items-start pl-2">
|
152 |
+
<div class="min-w-0 max-w-full flex-1">
|
153 |
+
<div class="_prosemirror-parent_15ceg_1 text-token-text-primary max-h-[25dvh] max-h-52 overflow-auto default-browser">
|
154 |
+
<textarea class="block h-10 w-full resize-none border-0 bg-transparent px-0 py-2 text-token-text-primary placeholder:text-token-text-secondary" autofocus="" placeholder="Message ChatGPT"></textarea>
|
155 |
+
<script nonce="2c1aeec6-392b-43e2-8822-d0f90e909f00">
|
156 |
+
window.__oai_logHTML ? window.__oai_logHTML() : window.__oai_SSR_HTML = window.__oai_SSR_HTML || Date.now();
|
157 |
+
requestAnimationFrame((function() {
|
158 |
+
window.__oai_logTTI ? window.__oai_logTTI() : window.__oai_SSR_TTI = window.__oai_SSR_TTI || Date.now()
|
159 |
+
}
|
160 |
+
))
|
161 |
+
</script>
|
162 |
+
</div>
|
163 |
+
</div>
|
164 |
+
<div class="w-[32px] pt-1"></div>
|
165 |
+
</div>
|
166 |
+
<div class="flex h-[44px] items-center justify-between">
|
167 |
+
<div class="flex gap-x-1">
|
168 |
+
<div class="relative">
|
169 |
+
<div class="relative">
|
170 |
+
<span class="hidden"></span>
|
171 |
+
<span class="flex" data-state="closed">
|
172 |
+
<div class="flex">
|
173 |
+
<button disabled="" aria-label="Attach files is unavailable" class="flex items-center justify-center h-8 w-8 rounded-lg rounded-bl-xl text-token-text-primary dark:text-white focus-visible:outline-black dark:focus-visible:outline-white opacity-30">
|
174 |
+
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
175 |
+
<path fill-rule="evenodd" clip-rule="evenodd" d="M9 7C9 4.23858 11.2386 2 14 2C16.7614 2 19 4.23858 19 7V15C19 18.866 15.866 22 12 22C8.13401 22 5 18.866 5 15V9C5 8.44772 5.44772 8 6 8C6.55228 8 7 8.44772 7 9V15C7 17.7614 9.23858 20 12 20C14.7614 20 17 17.7614 17 15V7C17 5.34315 15.6569 4 14 4C12.3431 4 11 5.34315 11 7V15C11 15.5523 11.4477 16 12 16C12.5523 16 13 15.5523 13 15V9C13 8.44772 13.4477 8 14 8C14.5523 8 15 8.44772 15 9V15C15 16.6569 13.6569 18 12 18C10.3431 18 9 16.6569 9 15V7Z" fill="currentColor"></path>
|
176 |
+
</svg>
|
177 |
+
</button>
|
178 |
+
<input disabled="" multiple="" type="file" style="display:none" tabindex="-1" class="hidden"/>
|
179 |
+
</div>
|
180 |
+
</span>
|
181 |
+
</div>
|
182 |
+
</div>
|
183 |
+
</div>
|
184 |
+
<div class="min-w-8">
|
185 |
+
<span class="hidden"></span>
|
186 |
+
<span class="" data-state="closed">
|
187 |
+
<button disabled="" aria-label="Send prompt" data-testid="send-button" class="flex h-8 w-8 items-center justify-center rounded-full transition-colors hover:opacity-70 focus-visible:outline-none focus-visible:outline-black disabled:text-[#f4f4f4] disabled:hover:opacity-100 dark:focus-visible:outline-white disabled:dark:bg-token-text-quaternary dark:disabled:text-token-main-surface-secondary bg-black text-white dark:bg-white dark:text-black disabled:bg-[#D7D7D7]">
|
188 |
+
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg" class="icon-2xl">
|
189 |
+
<path fill-rule="evenodd" clip-rule="evenodd" d="M15.1918 8.90615C15.6381 8.45983 16.3618 8.45983 16.8081 8.90615L21.9509 14.049C22.3972 14.4953 22.3972 15.2189 21.9509 15.6652C21.5046 16.1116 20.781 16.1116 20.3347 15.6652L17.1428 12.4734V22.2857C17.1428 22.9169 16.6311 23.4286 15.9999 23.4286C15.3688 23.4286 14.8571 22.9169 14.8571 22.2857V12.4734L11.6652 15.6652C11.2189 16.1116 10.4953 16.1116 10.049 15.6652C9.60265 15.2189 9.60265 14.4953 10.049 14.049L15.1918 8.90615Z" fill="currentColor"></path>
|
190 |
+
</svg>
|
191 |
+
</button>
|
192 |
+
</span>
|
193 |
+
</div>
|
194 |
+
</div>
|
195 |
+
</div>
|
196 |
+
</div>
|
197 |
+
</div>
|
198 |
+
</form>
|
199 |
+
</div>
|
200 |
+
<div class="hidden lg:block h-[70px]" style="opacity:0;will-change:opacity"></div>
|
201 |
+
<div class="w-full px-2 py-2 text-center text-xs text-token-text-secondary empty:hidden md:px-[60px] lg:absolute lg:bottom-0 lg:left-0">
|
202 |
+
<div class="min-h-4">
|
203 |
+
<div>ChatGPT can make mistakes. Check important info.</div>
|
204 |
+
</div>
|
205 |
+
</div>
|
206 |
+
</div>
|
207 |
+
</div>
|
208 |
+
</div>
|
209 |
+
</div>
|
210 |
+
</div>
|
211 |
+
</div>
|
212 |
+
<div class="md:pt-0 dark:border-white/20 md:border-transparent md:dark:border-transparent w-full">
|
213 |
+
<div>
|
214 |
+
<div class="relative w-full px-2 py-2 text-center text-xs text-token-text-secondary empty:hidden md:px-[60px]"></div>
|
215 |
+
</div>
|
216 |
+
</div>
|
217 |
+
</div>
|
218 |
+
<div class="group absolute bottom-2 end-2 z-20 flex flex-col gap-1 md:flex lg:bottom-3 lg:end-3">
|
219 |
+
<button class="flex h-6 w-6 items-center justify-center rounded-full border border-token-border-light text-xs text-token-text-secondary" type="button" id="radix-:R15qaaklj5:" aria-haspopup="menu" aria-expanded="false" data-state="closed" data-testid="undefined-button">?</button>
|
220 |
+
</div>
|
221 |
+
</main>
|
222 |
+
</div>
|
223 |
+
</div>
|
224 |
+
<div aria-live="assertive" aria-atomic="true" class="sr-only"></div>
|
225 |
+
<div aria-live="polite" aria-atomic="true" class="sr-only"></div>
|
226 |
+
<audio class="fixed bottom-0 left-0 hidden h-0 w-0" autoPlay="" crossorigin="anonymous"></audio>
|
227 |
+
<script nonce="2c1aeec6-392b-43e2-8822-d0f90e909f00">
|
228 |
+
window.__remixContext = {{ remix_context|tojson }};
|
229 |
+
__remixContext.p = function(v, e, p, x) {
|
230 |
+
if (typeof e !== 'undefined') {
|
231 |
+
x = new Error("Unexpected Server Error");
|
232 |
+
x.stack = undefined;
|
233 |
+
p = Promise.reject(x);
|
234 |
+
} else {
|
235 |
+
p = Promise.resolve(v);
|
236 |
+
}
|
237 |
+
return p;
|
238 |
+
}
|
239 |
+
;
|
240 |
+
__remixContext.n = function(i, k) {
|
241 |
+
__remixContext.t = __remixContext.t || {};
|
242 |
+
__remixContext.t[i] = __remixContext.t[i] || {};
|
243 |
+
let p = new Promise( (r, e) => {
|
244 |
+
__remixContext.t[i][k] = {
|
245 |
+
r: (v) => {
|
246 |
+
r(v);
|
247 |
+
}
|
248 |
+
,
|
249 |
+
e: (v) => {
|
250 |
+
e(v);
|
251 |
+
}
|
252 |
+
};
|
253 |
+
}
|
254 |
+
);
|
255 |
+
setTimeout( () => {
|
256 |
+
if (typeof p._error !== "undefined" || typeof p._data !== "undefined") {
|
257 |
+
return;
|
258 |
+
}
|
259 |
+
__remixContext.t[i][k].e(new Error("Server timeout."))
|
260 |
+
}
|
261 |
+
, 5000);
|
262 |
+
return p;
|
263 |
+
}
|
264 |
+
;
|
265 |
+
__remixContext.r = function(i, k, v, e, p, x) {
|
266 |
+
p = __remixContext.t[i][k];
|
267 |
+
if (typeof e !== 'undefined') {
|
268 |
+
x = new Error("Unexpected Server Error");
|
269 |
+
x.stack = undefined;
|
270 |
+
p.e(x);
|
271 |
+
} else {
|
272 |
+
p.r(v);
|
273 |
+
}
|
274 |
+
}
|
275 |
+
;
|
276 |
+
Object.assign(__remixContext.state.loaderData["root"], {
|
277 |
+
"rq:[\"account-status\"]": __remixContext.n("root", "rq:[\"account-status\"]")
|
278 |
+
});
|
279 |
+
Object.assign(__remixContext.state.loaderData["routes/_conversation"], {});
|
280 |
+
__remixContext.a = 1;
|
281 |
+
</script>
|
282 |
+
<script nonce="2c1aeec6-392b-43e2-8822-d0f90e909f00" type="module" async="">
|
283 |
+
import "/assets/manifest-7d43a138.js";
|
284 |
+
import*as route0 from "/assets/ihbvqeoqhbkuefft.js";
|
285 |
+
import*as route1 from "/assets/usnn0symwgs6mmsd.js";
|
286 |
+
import*as route2 from "/assets/jjr9on9cxlrbskjq.js";
|
287 |
+
|
288 |
+
window.__remixRouteModules = {
|
289 |
+
"root": route0,
|
290 |
+
"routes/_conversation": route1,
|
291 |
+
"routes/_conversation._index": route2
|
292 |
+
};
|
293 |
+
|
294 |
+
import("/assets/mxkyxjre6muko6z4.js");
|
295 |
+
</script>
|
296 |
+
<!--$?-->
|
297 |
+
<template id="B:0"></template>
|
298 |
+
<!--/$-->
|
299 |
+
<div hidden id="S:0">
|
300 |
+
<script nonce="2c1aeec6-392b-43e2-8822-d0f90e909f00" async="">
|
301 |
+
__remixContext.r("root", "rq:[\"account-status\"]", {
|
302 |
+
"__type": "AccountState",
|
303 |
+
"accountItems": [{
|
304 |
+
"data": {
|
305 |
+
"id": "chatgpt",
|
306 |
+
"residencyRegion": "no_constraint",
|
307 |
+
"accountUserId": "user-chatgpt__chatgpt",
|
308 |
+
"name": null,
|
309 |
+
"profilePictureId": null,
|
310 |
+
"profilePictureUrl": null,
|
311 |
+
"structure": "personal",
|
312 |
+
"role": "account-owner",
|
313 |
+
"organizationId": null,
|
314 |
+
"promoData": {},
|
315 |
+
"deactivated": false,
|
316 |
+
"subscriptionStatus": {
|
317 |
+
"billingPeriod": "monthly",
|
318 |
+
"hasPaidSubscription": true,
|
319 |
+
"isActiveSubscriptionGratis": false,
|
320 |
+
"subscriptionPlan": "chatgptpro",
|
321 |
+
"planType": "pro",
|
322 |
+
"subscriptionExpiresAt": 2524579200,
|
323 |
+
"scheduledPlanChange": {
|
324 |
+
"changesAt": 2524579200,
|
325 |
+
"planType": "pro"
|
326 |
+
},
|
327 |
+
"wasPaidCustomer": true,
|
328 |
+
"hasCustomerObject": true,
|
329 |
+
"lastActiveSubscription": {
|
330 |
+
"subscription_id": "5663ce0c-4d75-4b22-812a-52b65042c886",
|
331 |
+
"purchase_origin_platform": "chatgpt_web",
|
332 |
+
"will_renew": true
|
333 |
+
},
|
334 |
+
"isResellerHosted": false
|
335 |
+
},
|
336 |
+
"features": ["beta_features", "bizmo_settings", "breeze_available", "browsing_available", "canvas", "canvas_code_execution", "canvas_opt_in", "chart_serialization", "chat_preferences_available", "chatgpt_ios_attest", "chatgpt_team_plan", "code_interpreter_available", "d3_controls", "d3_editor", "d3_editor_gpts", "dalle_3", "gizmo_canvas_toggle", "gizmo_reviews", "gizmo_support_emails", "graphite", "mfa", "model_ab_use_v2", "model_switcher", "new_plugin_oauth_endpoint", "no_auth_training_enabled_by_default", "o1_launch", "o1_pro_launch", "paragen_mainline_alternative", "plugins_available", "privacy_policy_nov_2023", "search_tool", "sentinel_enabled_for_subscription", "share_multimodal_links", "shareable_links", "starter_prompts", "sunshine_available", "user_settings_announcements", "voice_advanced_ga"],
|
337 |
+
"canAccessWithCurrentSession": true,
|
338 |
+
"ssoConnectionName": null
|
339 |
+
}
|
340 |
+
}],
|
341 |
+
"currentAccountId": "chatgpt"
|
342 |
+
})
|
343 |
+
</script>
|
344 |
+
</div>
|
345 |
+
<script nonce="2c1aeec6-392b-43e2-8822-d0f90e909f00">
|
346 |
+
$RC = function(b, c, e) {
|
347 |
+
c = document.getElementById(c);
|
348 |
+
c.parentNode.removeChild(c);
|
349 |
+
var a = document.getElementById(b);
|
350 |
+
if (a) {
|
351 |
+
b = a.previousSibling;
|
352 |
+
if (e)
|
353 |
+
b.data = "$!",
|
354 |
+
a.setAttribute("data-dgst", e);
|
355 |
+
else {
|
356 |
+
e = b.parentNode;
|
357 |
+
a = b.nextSibling;
|
358 |
+
var f = 0;
|
359 |
+
do {
|
360 |
+
if (a && 8 === a.nodeType) {
|
361 |
+
var d = a.data;
|
362 |
+
if ("/$" === d)
|
363 |
+
if (0 === f)
|
364 |
+
break;
|
365 |
+
else
|
366 |
+
f--;
|
367 |
+
else
|
368 |
+
"$" !== d && "$?" !== d && "$!" !== d || f++
|
369 |
+
}
|
370 |
+
d = a.nextSibling;
|
371 |
+
e.removeChild(a);
|
372 |
+
a = d
|
373 |
+
} while (a);
|
374 |
+
for (; c.firstChild; )
|
375 |
+
e.insertBefore(c.firstChild, a);
|
376 |
+
b.data = "$"
|
377 |
+
}
|
378 |
+
b._reactRetry && b._reactRetry()
|
379 |
+
}
|
380 |
+
}
|
381 |
+
;
|
382 |
+
$RC("B:0", "S:0")
|
383 |
+
</script>
|
384 |
+
</body>
|
385 |
+
</html>
|
templates/chatgpt_context.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
templates/gpts_context.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
templates/login.html
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="zh-CN">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>登录</title>
|
7 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
8 |
+
<style>
|
9 |
+
#popup {
|
10 |
+
display: none;
|
11 |
+
}
|
12 |
+
</style>
|
13 |
+
</head>
|
14 |
+
<body class="bg-gradient-to-br from-blue-300 via-indigo-300 to-purple-400 min-h-screen flex items-center justify-center">
|
15 |
+
<div class="bg-white p-8 rounded-xl shadow-2xl w-96 max-w-md">
|
16 |
+
<h2 class="text-3xl font-bold text-center text-gray-800 mb-4">登录</h2>
|
17 |
+
<button
|
18 |
+
type="button"
|
19 |
+
onclick="openPopup()"
|
20 |
+
class="w-full bg-gradient-to-r from-indigo-500 to-blue-500 text-white font-bold py-3 px-4 rounded-lg hover:opacity-90 transition duration-200 ease-in-out"
|
21 |
+
>
|
22 |
+
RefreshToken / AccessToken
|
23 |
+
</button>
|
24 |
+
<p class="text-xs text-gray-500 text-center mt-4">
|
25 |
+
<span class="font-semibold text-gray-800">非
|
26 |
+
<span class="font-bold text-indigo-600">RT</span>
|
27 |
+
与
|
28 |
+
<span class="font-bold text-indigo-600">AT</span>
|
29 |
+
的输入,将作为</span>
|
30 |
+
<span class="font-bold text-indigo-600">Seed</span>
|
31 |
+
<span class="font-semibold text-gray-800">随机抽取后台账号</span>
|
32 |
+
</p>
|
33 |
+
</div>
|
34 |
+
|
35 |
+
<div id="popup" class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center">
|
36 |
+
<div class="bg-white p-8 rounded-xl shadow-lg max-w-lg w-full h-auto">
|
37 |
+
<!-- <h3 class="text-xl font-bold text-gray-800 mb-4 text-center">请输入您的 Token</h3>-->
|
38 |
+
<p class="font-semibold text-gray-800 text-center mb-1">直接点击
|
39 |
+
<span class="font-bold text-indigo-600">开始</span>
|
40 |
+
进入最近用过的账号</p>
|
41 |
+
<label for="popup-input"></label>
|
42 |
+
<textarea
|
43 |
+
id="popup-input"
|
44 |
+
name="token"
|
45 |
+
placeholder="RefreshToken / AccessToken / SeedToken"
|
46 |
+
class="w-full h-56 px-4 py-4 text-md rounded-md bg-gray-100 border-gray-300 focus:border-indigo-500 focus:bg-white focus:ring-2 focus:ring-indigo-200 text-gray-800 transition duration-200 ease-in-out mb-4"
|
47 |
+
style="resize: none;"
|
48 |
+
></textarea>
|
49 |
+
<div class="flex justify-center space-x-4">
|
50 |
+
<button
|
51 |
+
onclick="submitToken()"
|
52 |
+
class="bg-indigo-500 hover:bg-indigo-600 text-white font-bold py-2 px-4 rounded-lg transition duration-200 ease-in-out"
|
53 |
+
>
|
54 |
+
开 始
|
55 |
+
</button>
|
56 |
+
<button
|
57 |
+
onclick="closePopup()"
|
58 |
+
class="bg-gray-300 hover:bg-gray-400 text-gray-800 font-bold py-2 px-4 rounded-lg transition duration-200 ease-in-out"
|
59 |
+
>
|
60 |
+
取 消
|
61 |
+
</button>
|
62 |
+
</div>
|
63 |
+
</div>
|
64 |
+
</div>
|
65 |
+
|
66 |
+
<script>
|
67 |
+
function openPopup() {
|
68 |
+
document.getElementById('popup').style.display = 'flex';
|
69 |
+
}
|
70 |
+
|
71 |
+
function closePopup() {
|
72 |
+
document.getElementById('popup').style.display = 'none';
|
73 |
+
}
|
74 |
+
|
75 |
+
function submitToken() {
|
76 |
+
var inputValue = document.getElementById('popup-input').value;
|
77 |
+
window.location.href = '/?token=' + inputValue;
|
78 |
+
closePopup();
|
79 |
+
}
|
80 |
+
</script>
|
81 |
+
</body>
|
82 |
+
</html>
|
templates/tokens.html
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="zh-CN">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta content="width=device-width, initial-scale=1.0" name="viewport">
|
6 |
+
<title>Tokens 管理</title>
|
7 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
8 |
+
<script>
|
9 |
+
document.addEventListener('DOMContentLoaded', () => {
|
10 |
+
const apiPrefix = "{{ api_prefix }}";
|
11 |
+
const uploadForm = document.getElementById('uploadForm');
|
12 |
+
const clearForm = document.getElementById('clearForm');
|
13 |
+
const errorButton = document.getElementById('errorButton');
|
14 |
+
|
15 |
+
if (apiPrefix === "None") {
|
16 |
+
uploadForm.action = "/tokens/upload";
|
17 |
+
clearForm.action = "/tokens/clear";
|
18 |
+
errorButton.dataset.api = "/tokens/error";
|
19 |
+
} else {
|
20 |
+
uploadForm.action = `/${apiPrefix}/tokens/upload`;
|
21 |
+
clearForm.action = `/${apiPrefix}/tokens/clear`;
|
22 |
+
errorButton.dataset.api = `/${apiPrefix}/tokens/error`;
|
23 |
+
}
|
24 |
+
|
25 |
+
errorButton.addEventListener('click', async () => {
|
26 |
+
const response = await fetch(errorButton.dataset.api, {
|
27 |
+
method: 'POST',
|
28 |
+
});
|
29 |
+
const result = await response.json();
|
30 |
+
const errorTokens = result.error_tokens;
|
31 |
+
|
32 |
+
const errorModal = document.getElementById('errorModal');
|
33 |
+
const errorModalContent = document.getElementById('errorModalContent');
|
34 |
+
|
35 |
+
errorModalContent.innerHTML = errorTokens.map(token => `<p>${token}</p>`).join('');
|
36 |
+
errorModal.classList.remove('hidden');
|
37 |
+
});
|
38 |
+
|
39 |
+
document.getElementById('errorModalClose').addEventListener('click', () => {
|
40 |
+
document.getElementById('errorModal').classList.add('hidden');
|
41 |
+
});
|
42 |
+
|
43 |
+
document.getElementById('errorModalCopy').addEventListener('click', () => {
|
44 |
+
const errorModalContent = document.getElementById('errorModalContent');
|
45 |
+
const textToCopy = errorModalContent.innerText.replace(/\n\n/g, '\n');
|
46 |
+
navigator.clipboard.writeText(textToCopy).then(() => {
|
47 |
+
alert('错误 Tokens 已复制到剪贴板');
|
48 |
+
}).catch(err => {
|
49 |
+
alert('复制失败,请手动复制');
|
50 |
+
});
|
51 |
+
});
|
52 |
+
});
|
53 |
+
</script>
|
54 |
+
</head>
|
55 |
+
<body class="bg-gradient-to-r from-blue-200 via-purple-200 to-pink-200 flex justify-center items-center min-h-screen">
|
56 |
+
<div class="bg-white p-10 rounded-lg shadow-2xl w-128 text-center">
|
57 |
+
<h1 class="text-4xl font-extrabold text-gray-900 mb-6">Tokens 管理</h1>
|
58 |
+
<p class="text-gray-600 mb-4">当前可用 Tokens 数量:<span class="text-blue-600">{{ tokens_count }}</span></p>
|
59 |
+
<form class="mb-2" id="uploadForm" method="post">
|
60 |
+
<textarea class="w-full p-4 mb-4 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-400 resize-none" name="text" placeholder="一行一个Token,可以是 AccessToken 或 RefreshToken" rows="10"></textarea>
|
61 |
+
<p class="text-gray-600 mb-2">注:使用docker时如果挂载了data文件夹则重启后不需要再次上传</p>
|
62 |
+
<button class="w-full bg-blue-600 text-white py-3 rounded-md hover:bg-blue-700 transition duration-300 mb-2" type="submit">上传</button>
|
63 |
+
</form>
|
64 |
+
<button id="errorButton" class="w-full bg-yellow-600 text-white py-3 rounded-md hover:bg-yellow-700 transition duration-200 mt-2">查看错误Tokens</button>
|
65 |
+
<p class="text-gray-600 mt-2">点击清空,将会清空上传和错误的 Tokens</p>
|
66 |
+
<form id="clearForm" method="post">
|
67 |
+
<button class="w-full bg-red-600 text-white py-3 rounded-md hover:bg-red-700 transition duration-300" type="submit">清空Tokens</button>
|
68 |
+
</form>
|
69 |
+
</div>
|
70 |
+
|
71 |
+
<div id="errorModal" class="fixed inset-0 bg-gray-800 bg-opacity-75 flex justify-center items-center hidden">
|
72 |
+
<div class="bg-white p-6 rounded-lg shadow-lg w-150">
|
73 |
+
<h2 class="text-2xl font-bold mb-4">错误 Tokens</h2>
|
74 |
+
<div id="errorModalContent" class="list-disc list-inside text-left mb-4"></div>
|
75 |
+
<div class="flex justify-end space-x-4">
|
76 |
+
<button id="errorModalCopy" class="bg-green-600 text-white py-2 px-4 rounded-md hover:bg-green-700 transition duration-300">复制</button>
|
77 |
+
<button id="errorModalClose" class="bg-red-600 text-white py-2 px-4 rounded-md hover:bg-red-700 transition duration-300">关闭</button>
|
78 |
+
</div>
|
79 |
+
</div>
|
80 |
+
</div>
|
81 |
+
</body>
|
82 |
+
</html>
|
utils/Client.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
|
3 |
+
from curl_cffi.requests import AsyncSession
|
4 |
+
|
5 |
+
|
6 |
+
class Client:
|
7 |
+
def __init__(self, proxy=None, timeout=15, verify=True, impersonate='safari15_3'):
|
8 |
+
self.proxies = {"http": proxy, "https": proxy}
|
9 |
+
self.timeout = timeout
|
10 |
+
self.verify = verify
|
11 |
+
|
12 |
+
self.impersonate = impersonate
|
13 |
+
# impersonate=self.impersonate
|
14 |
+
|
15 |
+
# self.ja3 = ""
|
16 |
+
# self.akamai = ""
|
17 |
+
# ja3=self.ja3, akamai=self.akamai
|
18 |
+
self.session = AsyncSession(proxies=self.proxies, timeout=self.timeout, impersonate=self.impersonate, verify=self.verify)
|
19 |
+
self.session2 = AsyncSession(proxies=self.proxies, timeout=self.timeout, impersonate=self.impersonate, verify=self.verify)
|
20 |
+
|
21 |
+
async def post(self, *args, **kwargs):
|
22 |
+
r = await self.session.post(*args, **kwargs)
|
23 |
+
return r
|
24 |
+
|
25 |
+
async def post_stream(self, *args, headers=None, cookies=None, **kwargs):
|
26 |
+
if self.session:
|
27 |
+
headers = headers or self.session.headers
|
28 |
+
cookies = cookies or self.session.cookies
|
29 |
+
r = await self.session2.post(*args, headers=headers, cookies=cookies, **kwargs)
|
30 |
+
return r
|
31 |
+
|
32 |
+
async def get(self, *args, **kwargs):
|
33 |
+
r = await self.session.get(*args, **kwargs)
|
34 |
+
return r
|
35 |
+
|
36 |
+
async def request(self, *args, **kwargs):
|
37 |
+
r = await self.session.request(*args, **kwargs)
|
38 |
+
return r
|
39 |
+
|
40 |
+
async def put(self, *args, **kwargs):
|
41 |
+
r = await self.session.put(*args, **kwargs)
|
42 |
+
return r
|
43 |
+
|
44 |
+
async def close(self):
|
45 |
+
if hasattr(self, 'session'):
|
46 |
+
try:
|
47 |
+
await self.session.close()
|
48 |
+
del self.session
|
49 |
+
except Exception:
|
50 |
+
pass
|
51 |
+
if hasattr(self, 'session2'):
|
52 |
+
try:
|
53 |
+
await self.session2.close()
|
54 |
+
del self.session2
|
55 |
+
except Exception:
|
56 |
+
pass
|
utils/Logger.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
|
4 |
+
|
5 |
+
|
6 |
+
class Logger:
|
7 |
+
@staticmethod
|
8 |
+
def info(message):
|
9 |
+
logging.info(str(message))
|
10 |
+
|
11 |
+
@staticmethod
|
12 |
+
def warning(message):
|
13 |
+
logging.warning("\033[0;33m" + str(message) + "\033[0m")
|
14 |
+
|
15 |
+
@staticmethod
|
16 |
+
def error(message):
|
17 |
+
logging.error("\033[0;31m" + "-" * 50 + '\n| ' + str(message) + "\033[0m" + "\n" + "└" + "-" * 80)
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
def debug(message):
|
21 |
+
logging.debug("\033[0;37m" + str(message) + "\033[0m")
|
22 |
+
|
23 |
+
|
24 |
+
logger = Logger()
|
utils/configs.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import os
|
3 |
+
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
from utils.Logger import logger
|
7 |
+
|
8 |
+
load_dotenv(encoding="ascii")
|
9 |
+
|
10 |
+
|
11 |
+
def is_true(x):
|
12 |
+
if isinstance(x, bool):
|
13 |
+
return x
|
14 |
+
if isinstance(x, str):
|
15 |
+
return x.lower() in ['true', '1', 't', 'y', 'yes']
|
16 |
+
elif isinstance(x, int):
|
17 |
+
return x == 1
|
18 |
+
else:
|
19 |
+
return False
|
20 |
+
|
21 |
+
|
22 |
+
api_prefix = os.getenv('API_PREFIX', None)
|
23 |
+
authorization = os.getenv('AUTHORIZATION', '').replace(' ', '')
|
24 |
+
chatgpt_base_url = os.getenv('CHATGPT_BASE_URL', 'https://chatgpt.com').replace(' ', '')
|
25 |
+
auth_key = os.getenv('AUTH_KEY', None)
|
26 |
+
x_sign = os.getenv('X_SIGN', None)
|
27 |
+
|
28 |
+
ark0se_token_url = os.getenv('ARK' + 'OSE_TOKEN_URL', '').replace(' ', '')
|
29 |
+
if not ark0se_token_url:
|
30 |
+
ark0se_token_url = os.getenv('ARK0SE_TOKEN_URL', None)
|
31 |
+
proxy_url = os.getenv('PROXY_URL', '').replace(' ', '')
|
32 |
+
sentinel_proxy_url = os.getenv('SENTINEL_PROXY_URL', None)
|
33 |
+
export_proxy_url = os.getenv('EXPORT_PROXY_URL', None)
|
34 |
+
file_host = os.getenv('FILE_HOST', None)
|
35 |
+
voice_host = os.getenv('VOICE_HOST', None)
|
36 |
+
impersonate_list_str = os.getenv('IMPERSONATE', '[]')
|
37 |
+
user_agents_list_str = os.getenv('USER_AGENTS', '[]')
|
38 |
+
device_tuple_str = os.getenv('DEVICE_TUPLE', '()')
|
39 |
+
browser_tuple_str = os.getenv('BROWSER_TUPLE', '()')
|
40 |
+
platform_tuple_str = os.getenv('PLATFORM_TUPLE', '()')
|
41 |
+
|
42 |
+
cf_file_url = os.getenv('CF_FILE_URL', None)
|
43 |
+
turnstile_solver_url = os.getenv('TURNSTILE_SOLVER_URL', None)
|
44 |
+
|
45 |
+
history_disabled = is_true(os.getenv('HISTORY_DISABLED', True))
|
46 |
+
pow_difficulty = os.getenv('POW_DIFFICULTY', '000032')
|
47 |
+
retry_times = int(os.getenv('RETRY_TIMES', 3))
|
48 |
+
conversation_only = is_true(os.getenv('CONVERSATION_ONLY', False))
|
49 |
+
enable_limit = is_true(os.getenv('ENABLE_LIMIT', True))
|
50 |
+
upload_by_url = is_true(os.getenv('UPLOAD_BY_URL', False))
|
51 |
+
check_model = is_true(os.getenv('CHECK_MODEL', False))
|
52 |
+
scheduled_refresh = is_true(os.getenv('SCHEDULED_REFRESH', False))
|
53 |
+
random_token = is_true(os.getenv('RANDOM_TOKEN', True))
|
54 |
+
oai_language = os.getenv('OAI_LANGUAGE', 'zh-CN')
|
55 |
+
|
56 |
+
authorization_list = authorization.split(',') if authorization else []
|
57 |
+
chatgpt_base_url_list = chatgpt_base_url.split(',') if chatgpt_base_url else []
|
58 |
+
ark0se_token_url_list = ark0se_token_url.split(',') if ark0se_token_url else []
|
59 |
+
proxy_url_list = proxy_url.split(',') if proxy_url else []
|
60 |
+
sentinel_proxy_url_list = sentinel_proxy_url.split(',') if sentinel_proxy_url else []
|
61 |
+
impersonate_list = ast.literal_eval(impersonate_list_str)
|
62 |
+
user_agents_list = ast.literal_eval(user_agents_list_str)
|
63 |
+
device_tuple = ast.literal_eval(device_tuple_str)
|
64 |
+
browser_tuple = ast.literal_eval(browser_tuple_str)
|
65 |
+
platform_tuple = ast.literal_eval(platform_tuple_str)
|
66 |
+
|
67 |
+
enable_gateway = is_true(os.getenv('ENABLE_GATEWAY', False))
|
68 |
+
auto_seed = is_true(os.getenv('AUTO_SEED', True))
|
69 |
+
force_no_history = is_true(os.getenv('FORCE_NO_HISTORY', False))
|
70 |
+
no_sentinel = is_true(os.getenv('NO_SENTINEL', False))
|
71 |
+
|
72 |
+
with open('version.txt') as f:
|
73 |
+
version = f.read().strip()
|
74 |
+
|
75 |
+
logger.info("-" * 60)
|
76 |
+
logger.info(f"Chat2Api {version} | https://github.com/lanqian528/chat2api")
|
77 |
+
logger.info("-" * 60)
|
78 |
+
logger.info("Environment variables:")
|
79 |
+
logger.info("------------------------- Security -------------------------")
|
80 |
+
logger.info("API_PREFIX: " + str(api_prefix))
|
81 |
+
logger.info("AUTHORIZATION: " + str(authorization_list))
|
82 |
+
logger.info("AUTH_KEY: " + str(auth_key))
|
83 |
+
logger.info("------------------------- Request --------------------------")
|
84 |
+
logger.info("CHATGPT_BASE_URL: " + str(chatgpt_base_url_list))
|
85 |
+
logger.info("PROXY_URL: " + str(proxy_url_list))
|
86 |
+
logger.info("EXPORT_PROXY_URL: " + str(export_proxy_url))
|
87 |
+
logger.info("FILE_HOST: " + str(file_host))
|
88 |
+
logger.info("VOICE_HOST: " + str(voice_host))
|
89 |
+
logger.info("IMPERSONATE: " + str(impersonate_list))
|
90 |
+
logger.info("USER_AGENTS: " + str(user_agents_list))
|
91 |
+
logger.info("---------------------- Functionality -----------------------")
|
92 |
+
logger.info("HISTORY_DISABLED: " + str(history_disabled))
|
93 |
+
logger.info("POW_DIFFICULTY: " + str(pow_difficulty))
|
94 |
+
logger.info("RETRY_TIMES: " + str(retry_times))
|
95 |
+
logger.info("CONVERSATION_ONLY: " + str(conversation_only))
|
96 |
+
logger.info("ENABLE_LIMIT: " + str(enable_limit))
|
97 |
+
logger.info("UPLOAD_BY_URL: " + str(upload_by_url))
|
98 |
+
logger.info("CHECK_MODEL: " + str(check_model))
|
99 |
+
logger.info("SCHEDULED_REFRESH: " + str(scheduled_refresh))
|
100 |
+
logger.info("RANDOM_TOKEN: " + str(random_token))
|
101 |
+
logger.info("OAI_LANGUAGE: " + str(oai_language))
|
102 |
+
logger.info("------------------------- Gateway --------------------------")
|
103 |
+
logger.info("ENABLE_GATEWAY: " + str(enable_gateway))
|
104 |
+
logger.info("AUTO_SEED: " + str(auto_seed))
|
105 |
+
logger.info("FORCE_NO_HISTORY: " + str(force_no_history))
|
106 |
+
logger.info("-" * 60)
|
utils/globals.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import utils.configs as configs
|
5 |
+
from utils.Logger import logger
|
6 |
+
|
7 |
+
DATA_FOLDER = "data"
|
8 |
+
TOKENS_FILE = os.path.join(DATA_FOLDER, "token.txt")
|
9 |
+
REFRESH_MAP_FILE = os.path.join(DATA_FOLDER, "refresh_map.json")
|
10 |
+
ERROR_TOKENS_FILE = os.path.join(DATA_FOLDER, "error_token.txt")
|
11 |
+
WSS_MAP_FILE = os.path.join(DATA_FOLDER, "wss_map.json")
|
12 |
+
FP_FILE = os.path.join(DATA_FOLDER, "fp_map.json")
|
13 |
+
SEED_MAP_FILE = os.path.join(DATA_FOLDER, "seed_map.json")
|
14 |
+
CONVERSATION_MAP_FILE = os.path.join(DATA_FOLDER, "conversation_map.json")
|
15 |
+
|
16 |
+
count = 0
|
17 |
+
token_list = []
|
18 |
+
error_token_list = []
|
19 |
+
refresh_map = {}
|
20 |
+
wss_map = {}
|
21 |
+
fp_map = {}
|
22 |
+
seed_map = {}
|
23 |
+
conversation_map = {}
|
24 |
+
impersonate_list = [
|
25 |
+
"chrome99",
|
26 |
+
"chrome100",
|
27 |
+
"chrome101",
|
28 |
+
"chrome104",
|
29 |
+
"chrome107",
|
30 |
+
"chrome110",
|
31 |
+
"chrome116",
|
32 |
+
"chrome119",
|
33 |
+
"chrome120",
|
34 |
+
"chrome123",
|
35 |
+
"edge99",
|
36 |
+
"edge101",
|
37 |
+
] if not configs.impersonate_list else configs.impersonate_list
|
38 |
+
|
39 |
+
if not os.path.exists(DATA_FOLDER):
|
40 |
+
os.makedirs(DATA_FOLDER)
|
41 |
+
|
42 |
+
if os.path.exists(REFRESH_MAP_FILE):
|
43 |
+
with open(REFRESH_MAP_FILE, "r") as f:
|
44 |
+
try:
|
45 |
+
refresh_map = json.load(f)
|
46 |
+
except:
|
47 |
+
refresh_map = {}
|
48 |
+
else:
|
49 |
+
refresh_map = {}
|
50 |
+
|
51 |
+
if os.path.exists(WSS_MAP_FILE):
|
52 |
+
with open(WSS_MAP_FILE, "r") as f:
|
53 |
+
try:
|
54 |
+
wss_map = json.load(f)
|
55 |
+
except:
|
56 |
+
wss_map = {}
|
57 |
+
else:
|
58 |
+
wss_map = {}
|
59 |
+
|
60 |
+
if os.path.exists(FP_FILE):
|
61 |
+
with open(FP_FILE, "r", encoding="utf-8") as f:
|
62 |
+
try:
|
63 |
+
fp_map = json.load(f)
|
64 |
+
except:
|
65 |
+
fp_map = {}
|
66 |
+
else:
|
67 |
+
fp_map = {}
|
68 |
+
|
69 |
+
if os.path.exists(SEED_MAP_FILE):
|
70 |
+
with open(SEED_MAP_FILE, "r") as f:
|
71 |
+
try:
|
72 |
+
seed_map = json.load(f)
|
73 |
+
except:
|
74 |
+
seed_map = {}
|
75 |
+
else:
|
76 |
+
seed_map = {}
|
77 |
+
|
78 |
+
if os.path.exists(CONVERSATION_MAP_FILE):
|
79 |
+
with open(CONVERSATION_MAP_FILE, "r") as f:
|
80 |
+
try:
|
81 |
+
conversation_map = json.load(f)
|
82 |
+
except:
|
83 |
+
conversation_map = {}
|
84 |
+
else:
|
85 |
+
conversation_map = {}
|
86 |
+
|
87 |
+
if os.path.exists(TOKENS_FILE):
|
88 |
+
with open(TOKENS_FILE, "r", encoding="utf-8") as f:
|
89 |
+
for line in f:
|
90 |
+
if line.strip() and not line.startswith("#"):
|
91 |
+
token_list.append(line.strip())
|
92 |
+
else:
|
93 |
+
with open(TOKENS_FILE, "w", encoding="utf-8") as f:
|
94 |
+
pass
|
95 |
+
|
96 |
+
if os.path.exists(ERROR_TOKENS_FILE):
|
97 |
+
with open(ERROR_TOKENS_FILE, "r", encoding="utf-8") as f:
|
98 |
+
for line in f:
|
99 |
+
if line.strip() and not line.startswith("#"):
|
100 |
+
error_token_list.append(line.strip())
|
101 |
+
else:
|
102 |
+
with open(ERROR_TOKENS_FILE, "w", encoding="utf-8") as f:
|
103 |
+
pass
|
104 |
+
|
105 |
+
if token_list:
|
106 |
+
logger.info(f"Token list count: {len(token_list)}, Error token list count: {len(error_token_list)}")
|
107 |
+
logger.info("-" * 60)
|
utils/kv_utils.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def set_value_for_key(data, target_key, new_value):
|
2 |
+
if isinstance(data, dict):
|
3 |
+
for key, value in data.items():
|
4 |
+
if key == target_key:
|
5 |
+
data[key] = new_value
|
6 |
+
else:
|
7 |
+
set_value_for_key(value, target_key, new_value)
|
8 |
+
elif isinstance(data, list):
|
9 |
+
for item in data:
|
10 |
+
set_value_for_key(item, target_key, new_value)
|
utils/retry.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import HTTPException
|
2 |
+
|
3 |
+
from utils.Logger import logger
|
4 |
+
from utils.configs import retry_times
|
5 |
+
|
6 |
+
|
7 |
+
async def async_retry(func, *args, max_retries=retry_times, **kwargs):
|
8 |
+
for attempt in range(max_retries + 1):
|
9 |
+
try:
|
10 |
+
result = await func(*args, **kwargs)
|
11 |
+
return result
|
12 |
+
except HTTPException as e:
|
13 |
+
if attempt == max_retries:
|
14 |
+
logger.error(f"Throw an exception {e.status_code}, {e.detail}")
|
15 |
+
if e.status_code == 500:
|
16 |
+
raise HTTPException(status_code=500, detail="Server error")
|
17 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
18 |
+
logger.info(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
|
19 |
+
|
20 |
+
|
21 |
+
def retry(func, *args, max_retries=retry_times, **kwargs):
|
22 |
+
for attempt in range(max_retries + 1):
|
23 |
+
try:
|
24 |
+
result = func(*args, **kwargs)
|
25 |
+
return result
|
26 |
+
except HTTPException as e:
|
27 |
+
if attempt == max_retries:
|
28 |
+
logger.error(f"Throw an exception {e.status_code}, {e.detail}")
|
29 |
+
if e.status_code == 500:
|
30 |
+
raise HTTPException(status_code=500, detail="Server error")
|
31 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
32 |
+
logger.error(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
|
version.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
v1.7.6-beta5
|