Spaces:
Running
Running
Commit
·
5693654
0
Parent(s):
init
Browse files- .env.example +42 -0
- .github/workflows/docker-build.yml +61 -0
- .github/workflows/security-scan.yml +43 -0
- .github/workflows/sync.yml +60 -0
- .gitignore +20 -0
- .python-version +1 -0
- Dockerfile +27 -0
- README.md +279 -0
- app/clients/__init__.py +5 -0
- app/clients/base_client.py +54 -0
- app/clients/claude_client.py +142 -0
- app/clients/deepseek_client.py +133 -0
- app/deepclaude/__init__.py +0 -0
- app/deepclaude/deepclaude.py +268 -0
- app/main.py +160 -0
- app/utils/auth.py +47 -0
- app/utils/logger.py +70 -0
- pyproject.toml +14 -0
- uv.lock +0 -0
.env.example
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 客户端请求时允许通过请求的 API KEY,无需在当前环境变量当中手动添加 Bearer,目前只支持一个,未来可以升级为数组/toml/yaml 或专门的管理工具
|
2 |
+
ALLOW_API_KEY=your_api_key
|
3 |
+
|
4 |
+
# 服务端跨域配置
|
5 |
+
# 允许访问的域名,多个域名使用逗号分隔(中间不能有空格),例如:http://localhost:3000,https://chat.example.com
|
6 |
+
# 如果允许所有域名访问,则填写 *
|
7 |
+
ALLOW_ORIGINS=*
|
8 |
+
|
9 |
+
# DeepSeek API KEY,默认为 DeepSeek 官方 API,只支持 r1 系列模型(包括基于 llama 和 qwen 的密集模型,1.5b、7b、8b、14b、32b、70b、671b)
|
10 |
+
# 除了采用官网的 DeepSeek r1 外,还推荐以下:
|
11 |
+
# 1.SiliconFlow 的 deepseek-ai/DeepSeek-R1 获取 API KEY 的链接:https://cloud.siliconflow.cn/i/RXikvHE2 (点击此链接可以获得到 2000 万免费 tokens)
|
12 |
+
# 2.Groq 托管的 deepseek-r1-distill-llama-70b 获取 API KEY 的链接:https://console.groq.com/docs/models
|
13 |
+
# 3.本地 Ollama 运行的 DeepSeek r1 , 可以根据自己电脑的内存和显存大小而定,推荐 7b、8b、14b 最佳,获取链接:https://ollama.com
|
14 |
+
DEEPSEEK_API_KEY=your_deepseek_api_key
|
15 |
+
DEEPSEEK_API_URL=https://api.deepseek.com/v1/chat/completions #如果是siliconflow,则使用 https://api.siliconflow.cn/v1/chat/completions
|
16 |
+
DEEPSEEK_MODEL=deepseek-reasoner #如果是siliconflow,则使用 deepseek-ai/DeepSeek-R1
|
17 |
+
|
18 |
+
# DeepSeek推理过程格式配置
|
19 |
+
# 该变量用于区别返回体中推理过程的格式
|
20 |
+
# 目前支持两种格式:
|
21 |
+
# 1. Origin_Reasoning (支持 reasoning_content 字段返回) (DeepSeek官方 deepseek-reasoner 模型、 SiliconFlow 的 deepseek-ai/DeepSeek-R1 模型、火山云的 DeepSeek R1 模型均支持)
|
22 |
+
# 2. <think></think> 标签格式(通常是通过 deepseek r1 蒸馏过的模型采用这种格式,包括各个平台托管的基于 llama 和 qwen 的密集模型,比如 7b、8b、14b、32b、70b 的版本)
|
23 |
+
# 填写true表示使用 Origin_Reasoning 格式,填写false表示使用 <think></think> 标签格式
|
24 |
+
IS_ORIGIN_REASONING=true
|
25 |
+
|
26 |
+
# Claude API KEY,默认为 Claude 官方 API,只推荐 Claude 3.5 Sonnet 模型,不推荐其他模型
|
27 |
+
CLAUDE_API_KEY=your_claude_api_key
|
28 |
+
CLAUDE_MODEL=claude-3-5-sonnet-20241022
|
29 |
+
|
30 |
+
# Claude Provider
|
31 |
+
# 若使用官方则填 anthropic
|
32 |
+
# 若使用中转平台如 OpenRouter 或国内基于 OneAPI 的中转平台则填oneapi
|
33 |
+
CLAUDE_PROVIDER=anthropic
|
34 |
+
|
35 |
+
# 如果使用 OpenRouter,这里填写 OpenRouter 的 API URL https://openrouter.ai/api/v1/chat/completions,
|
36 |
+
# 如果使用中转平台则填写相应的 API URL: 例如 https://api.oneapi.com/v1/chat/completions
|
37 |
+
# 默认为 Anthropic 的 API URL https://api.anthropic.com/v1/messages
|
38 |
+
CLAUDE_API_URL=https://api.anthropic.com/v1/messages
|
39 |
+
|
40 |
+
# 日志配置
|
41 |
+
# 可选值:DEBUG, INFO, WARNING, ERROR, CRITICAL
|
42 |
+
LOG_LEVEL=INFO
|
.github/workflows/docker-build.yml
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Docker Build
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [ "main" ]
|
6 |
+
workflow_dispatch:
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
check-repository:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
outputs:
|
12 |
+
is_original: ${{ steps.check.outputs.is_original }}
|
13 |
+
steps:
|
14 |
+
- id: check
|
15 |
+
run: |
|
16 |
+
if [ "${{ github.repository }}" = "ErlichLiu/DeepClaude" ]; then
|
17 |
+
echo "is_original=true" >> $GITHUB_OUTPUT
|
18 |
+
else
|
19 |
+
echo "is_original=false" >> $GITHUB_OUTPUT
|
20 |
+
fi
|
21 |
+
|
22 |
+
build:
|
23 |
+
needs: check-repository
|
24 |
+
if: needs.check-repository.outputs.is_original == 'true'
|
25 |
+
runs-on: ubuntu-latest
|
26 |
+
permissions:
|
27 |
+
packages: write
|
28 |
+
contents: read
|
29 |
+
|
30 |
+
steps:
|
31 |
+
- uses: actions/checkout@v4
|
32 |
+
|
33 |
+
- name: Login to GitHub Container Registry
|
34 |
+
uses: docker/login-action@v3
|
35 |
+
with:
|
36 |
+
registry: ghcr.io
|
37 |
+
username: ${{ github.repository_owner }}
|
38 |
+
password: ${{ secrets.GITHUB_TOKEN }}
|
39 |
+
|
40 |
+
- name: Set lowercase variables
|
41 |
+
run: |
|
42 |
+
OWNER_LOWER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
43 |
+
REPO_NAME_LOWER=$(echo "${{ github.event.repository.name }}" | tr '[:upper:]' '[:lower:]')
|
44 |
+
echo "OWNER_LOWER=$OWNER_LOWER" >> $GITHUB_ENV
|
45 |
+
echo "REPO_NAME_LOWER=$REPO_NAME_LOWER" >> $GITHUB_ENV
|
46 |
+
|
47 |
+
- name: Set up Docker Buildx
|
48 |
+
uses: docker/setup-buildx-action@v3
|
49 |
+
with:
|
50 |
+
driver: docker-container
|
51 |
+
|
52 |
+
- name: Build and push Docker image
|
53 |
+
uses: docker/build-push-action@v5
|
54 |
+
with:
|
55 |
+
context: .
|
56 |
+
file: ./Dockerfile
|
57 |
+
push: true
|
58 |
+
platforms: linux/amd64,linux/arm64
|
59 |
+
tags: |
|
60 |
+
ghcr.io/${{ env.OWNER_LOWER }}/${{ env.REPO_NAME_LOWER }}:latest
|
61 |
+
ghcr.io/${{ env.OWNER_LOWER }}/${{ env.REPO_NAME_LOWER }}:${{ github.sha }}
|
.github/workflows/security-scan.yml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Security Scan
|
2 |
+
|
3 |
+
on:
|
4 |
+
schedule:
|
5 |
+
- cron: '0 0 * * 0' # 每周运行一次
|
6 |
+
workflow_dispatch:
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
check-repository:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
outputs:
|
12 |
+
is_original: ${{ steps.check.outputs.is_original }}
|
13 |
+
steps:
|
14 |
+
- id: check
|
15 |
+
run: |
|
16 |
+
if [ "${{ github.repository }}" = "ErlichLiu/DeepClaude" ]; then
|
17 |
+
echo "is_original=true" >> $GITHUB_OUTPUT
|
18 |
+
else
|
19 |
+
echo "is_original=false" >> $GITHUB_OUTPUT
|
20 |
+
fi
|
21 |
+
|
22 |
+
scan:
|
23 |
+
needs: check-repository
|
24 |
+
if: needs.check-repository.outputs.is_original == 'true'
|
25 |
+
runs-on: ubuntu-latest
|
26 |
+
steps:
|
27 |
+
- uses: actions/checkout@v4
|
28 |
+
|
29 |
+
- name: Set lowercase variables
|
30 |
+
run: |
|
31 |
+
OWNER_LOWER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
|
32 |
+
REPO_NAME_LOWER=$(echo "${{ github.event.repository.name }}" | tr '[:upper:]' '[:lower:]')
|
33 |
+
echo "OWNER_LOWER=$OWNER_LOWER" >> $GITHUB_ENV
|
34 |
+
echo "REPO_NAME_LOWER=$REPO_NAME_LOWER" >> $GITHUB_ENV
|
35 |
+
|
36 |
+
- name: Run Trivy vulnerability scanner
|
37 |
+
uses: aquasecurity/trivy-action@master
|
38 |
+
with:
|
39 |
+
image-ref: ghcr.io/${{ env.OWNER_LOWER }}/${{ env.REPO_NAME_LOWER }}:latest
|
40 |
+
format: 'table'
|
41 |
+
exit-code: '1'
|
42 |
+
ignore-unfixed: true
|
43 |
+
severity: 'CRITICAL'
|
.github/workflows/sync.yml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Upstream Sync
|
2 |
+
|
3 |
+
on:
|
4 |
+
schedule:
|
5 |
+
- cron: "0 0 * * *"
|
6 |
+
workflow_dispatch:
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
# 检查是否为 fork 仓库
|
10 |
+
check-repository:
|
11 |
+
name: Check Repository Type
|
12 |
+
runs-on: ubuntu-latest
|
13 |
+
outputs:
|
14 |
+
is_fork: ${{ steps.check.outputs.is_fork }}
|
15 |
+
steps:
|
16 |
+
- id: check
|
17 |
+
run: |
|
18 |
+
if [ "${{ github.repository }}" != "ErlichLiu/DeepClaude" ]; then
|
19 |
+
echo "is_fork=true" >> $GITHUB_OUTPUT
|
20 |
+
else
|
21 |
+
echo "is_fork=false" >> $GITHUB_OUTPUT
|
22 |
+
fi
|
23 |
+
|
24 |
+
# 同步上游更改
|
25 |
+
sync:
|
26 |
+
needs: check-repository
|
27 |
+
if: needs.check-repository.outputs.is_fork == 'true'
|
28 |
+
name: Sync Latest From Upstream
|
29 |
+
runs-on: ubuntu-latest
|
30 |
+
steps:
|
31 |
+
# 标准签出
|
32 |
+
- name: Checkout target repo
|
33 |
+
uses: actions/checkout@v4
|
34 |
+
# 如果上游仓库有对.github/workflows/下的文件进行变更,则需要使用有workflow权限的token
|
35 |
+
# with:
|
36 |
+
# token: ${{ secrets.ACTION_TOKEN }}
|
37 |
+
|
38 |
+
# 获取分支名(区分 PR 和普通提交场景)
|
39 |
+
- name: Get branch name (merge)
|
40 |
+
if: github.event_name != 'pull_request'
|
41 |
+
shell: bash
|
42 |
+
run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV
|
43 |
+
|
44 |
+
- name: Get branch name (pull request)
|
45 |
+
if: github.event_name == 'pull_request'
|
46 |
+
shell: bash
|
47 |
+
run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / -)" >> $GITHUB_ENV
|
48 |
+
|
49 |
+
# 运行同步动作
|
50 |
+
- name: Sync upstream changes
|
51 |
+
id: sync
|
52 |
+
uses: aormsby/[email protected]
|
53 |
+
with:
|
54 |
+
upstream_sync_repo: ErlichLiu/DeepClaude
|
55 |
+
upstream_sync_branch: ${{ env.BRANCH_NAME }}
|
56 |
+
target_sync_branch: ${{ env.BRANCH_NAME }}
|
57 |
+
target_repo_token: ${{ secrets.GITHUB_TOKEN }}
|
58 |
+
upstream_pull_args: --allow-unrelated-histories --no-edit
|
59 |
+
shallow_since: '1 days ago'
|
60 |
+
test_mode: false
|
.gitignore
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python-generated files
|
2 |
+
__pycache__/
|
3 |
+
*.py[oc]
|
4 |
+
build/
|
5 |
+
dist/
|
6 |
+
wheels/
|
7 |
+
*.egg-info
|
8 |
+
|
9 |
+
# Environment
|
10 |
+
.env
|
11 |
+
.env.*
|
12 |
+
!.env.example
|
13 |
+
|
14 |
+
# Virtual environments
|
15 |
+
.venv
|
16 |
+
|
17 |
+
# Windsurf
|
18 |
+
.windsurfrules
|
19 |
+
|
20 |
+
docker-compose.yml
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.11
|
Dockerfile
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 使用 Python 3.11 slim 版本作为基础镜像
|
2 |
+
FROM python:3.11-slim
|
3 |
+
|
4 |
+
# 设置工作目录
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# 设置环境变量
|
8 |
+
ENV PYTHONUNBUFFERED=1 \
|
9 |
+
PYTHONDONTWRITEBYTECODE=1
|
10 |
+
|
11 |
+
# 安装依赖
|
12 |
+
RUN pip install --no-cache-dir \
|
13 |
+
aiohttp==3.11.11 \
|
14 |
+
colorlog==6.9.0 \
|
15 |
+
fastapi==0.115.8 \
|
16 |
+
python-dotenv==1.0.1 \
|
17 |
+
tiktoken==0.8.0 \
|
18 |
+
"uvicorn[standard]"
|
19 |
+
|
20 |
+
# 复制项目文件
|
21 |
+
COPY ./app ./app
|
22 |
+
|
23 |
+
# 暴露端口
|
24 |
+
EXPOSE 8000
|
25 |
+
|
26 |
+
# 启动命令
|
27 |
+
CMD ["python", "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
README.md
ADDED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div>
|
2 |
+
<h1>DeepClaude 🐬🧠 - OpenAI Compatible</h1>
|
3 |
+
|
4 |
+
<a href="https://github.com/getasterisk/deepclaude"> Inspiration from getasterisk/deepclaude</a>
|
5 |
+
|
6 |
+
[](#)
|
7 |
+
[](https://openai.com)
|
8 |
+
|
9 |
+
</div>
|
10 |
+
|
11 |
+
<div>
|
12 |
+
<h3 style="color: #FF9909"> 特别说明:对于不太会部署,只是希望使用上最强 DeepClaude 组合的朋友,请直接联系 Erlich(微信:geekthings)购买按量付费的 API 即可,国内可以直接访问 </h3>
|
13 |
+
</div>
|
14 |
+
|
15 |
+
---
|
16 |
+
|
17 |
+
<details>
|
18 |
+
<summary><strong>更新日志:</strong></summary>
|
19 |
+
<div>
|
20 |
+
2025-02-08.2: 支持非流式请求,支持 OpenAI 兼容的 models 接口返回。(⚠️ 当前暂未实现正确的 tokens 消耗统计,稍后更新)
|
21 |
+
|
22 |
+
2025-02-08.1: 添加 Github Actions,支持 fork 自动同步、支持自动构建 Docker 最新镜像、支持 docker-compose 部署
|
23 |
+
|
24 |
+
2025-02-07.2: 修复 Claude temperature 参数可能会超过范围导致的请求失败的 bug
|
25 |
+
|
26 |
+
2025-02-07.1: 支持 Claude temputerature 等参数;添加更详细的 .env.example 说明
|
27 |
+
|
28 |
+
2025-02-06.1:修复非原生推理模型无法获得到推理内容的 bug
|
29 |
+
|
30 |
+
2025-02-05.1: 支持通过环境变量配置是否是原生支持推理字段的模型,满血版本通常支持
|
31 |
+
|
32 |
+
2025-02-04.2: 支持跨域配置,可在 .env 中配置
|
33 |
+
|
34 |
+
2025-02-04.1: 支持 Openrouter 以及 OneAPI 等中转服务商作为 Claude 部分的供应商
|
35 |
+
|
36 |
+
2025-02-03.3: 支持 OpenRouter 作为 Claude 的供应商,详见 .env.example 说明
|
37 |
+
|
38 |
+
2025-02-03.2: 由于 deepseek r1 在某种程度上已经开启了一个规范,所以我们也遵循推理标注的这种规范,更好适配支持的更好的 Cherry Studio 等软件。
|
39 |
+
|
40 |
+
2025-02-03.1: Siliconflow 的 DeepSeek R1 返回结构变更,支持新的返回结构
|
41 |
+
|
42 |
+
</div>
|
43 |
+
</details>
|
44 |
+
|
45 |
+
# Table of Contents
|
46 |
+
|
47 |
+
- [Table of Contents](#table-of-contents)
|
48 |
+
- [Introduction](#introduction)
|
49 |
+
- [Implementation](#implementation)
|
50 |
+
- [How to run](#how-to-run)
|
51 |
+
- [1. 获得运行所需的 API](#1-获得运行所需的-api)
|
52 |
+
- [2. 开始运行(本地运行)](#2-开始运行本地运行)
|
53 |
+
- [Deployment](#deployment)
|
54 |
+
- [Railway 一键部署(推荐)](#railway-一键部署推荐)
|
55 |
+
- [Zeabur 一键部署(一定概率下会遇到 Domain 生成问题,需要重新创建 project 部署)](#zeabur-一键部署一定概率下会遇到-domain-生成问题需要重新创建-project-部署)
|
56 |
+
- [使用 docker-compose 部署(Docker 镜像将随着 main 分支自动更新到最新)](#使用-docker-compose-部署docker-镜像将随着-main-分支自动更新到最新)
|
57 |
+
- [Docker 部署(自行 Build)](#docker-部署自行-build)
|
58 |
+
- [Automatic fork sync](#automatic-fork-sync)
|
59 |
+
- [Technology Stack](#technology-stack)
|
60 |
+
- [Star History](#star-history)
|
61 |
+
- [Buy me a coffee](#buy-me-a-coffee)
|
62 |
+
- [About Me](#about-me)
|
63 |
+
|
64 |
+
# Introduction
|
65 |
+
最近 DeepSeek 推出了 [DeepSeek R1 模型](https://platform.deepseek.com),在推理能力上已经达到了第一梯队。但是 DeepSeek R1 在一些日常任务的输出上可能仍然无法匹敌 Claude 3.5 Sonnet。Aider 团队最近有一篇研究,表示通过[采用 DeepSeek R1 + Claude 3.5 Sonnet 可以实现最好的效果](https://aider.chat/2025/01/24/r1-sonnet.html)。
|
66 |
+
|
67 |
+
<img src="https://img.erlich.fun/personal-blog/uPic/heiQYX.png" alt="deepseek r1 and sonnet benchmark" style="width=400px;"/>
|
68 |
+
|
69 |
+
> **R1 as architect with Sonnet as editor has set a new SOTA of 64.0%** on the [aider polyglot benchmark](https://aider.chat/2024/12/21/polyglot.html). They achieve this at **14X less cost** compared to the previous o1 SOTA result.
|
70 |
+
|
71 |
+
并且 Aider 还 [开源了 Demo](https://github.com/getasterisk/deepclaude),你可以直接在他们的项目上进行在线体验。
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
本项目受到该项目的启发,通过 fastAPI 完全重写,并支持 OpenAI 兼容格式,支持 DeepSeek 官方 API 以及第三方托管的 API。
|
76 |
+
|
77 |
+
用户可以自行运行在自己的服务器,并对外提供开放 API 接口,接入 [OneAPI](https://github.com/songquanpeng/one-api) 等实现统一分发(token 消耗部分仍需开发)。也可以接入你的日常 ChatBox 软件以及 接入 [Cursor](https://www.cursor.com/) 等软件实现更好的编程效果(Claude 的流式输出+ Tool use 仍需开发)。
|
78 |
+
|
79 |
+
# Implementation
|
80 |
+
⚠️Notice: 目前只支持流式输出模式(因为这是效率最高的模式,不会浪费时间);接下来会实现第一段 DeepSeek 推理阶段流式,Claude 输出非流式的模式(处于节省时间的考虑)。
|
81 |
+
|
82 |
+

|
83 |
+
|
84 |
+
# How to run
|
85 |
+
|
86 |
+
> 项目支持本地运行和服务器运行,本地运行可与 Ollama 搭配,实现用本地的 DeepSeek R1 与 Claude 组合输出
|
87 |
+
|
88 |
+
|
89 |
+
## 1. 获得运行所需的 API
|
90 |
+
|
91 |
+
1. 获取 DeepSeek API,因为最近 DeepSeek 还在遭受攻击,所以经常无法使用,推荐使用 Siliconflow 的效果更好(也可以本地 Ollama 的): https://cloud.siliconflow.cn/i/RXikvHE2 (点击此链接可以获得到 2000 万免费 tokens)
|
92 |
+
2. 获取 Claude 的 API KEY (目前还没有做中转模式,以及对 Google 和 AWS 托管的版本的兼容支持,欢迎 PR):https://console.anthropic.com
|
93 |
+
|
94 |
+
## 2. 开始运行(本地运行)
|
95 |
+
Step 1. 克隆本项目到适合的文件夹并进入项目
|
96 |
+
|
97 |
+
```bash
|
98 |
+
git clone [email protected]:ErlichLiu/DeepClaude.git
|
99 |
+
cd DeepClaude
|
100 |
+
```
|
101 |
+
|
102 |
+
Step 2. 通过 uv 安装依赖(如果你还没有安装 uv,请看下方注解)
|
103 |
+
|
104 |
+
```bash
|
105 |
+
# 通过 uv 在本地创建虚拟环境,并安装依赖
|
106 |
+
uv sync
|
107 |
+
# macOS 激活虚拟环境
|
108 |
+
source .venv/bin/activate
|
109 |
+
# Windows 激活虚拟环境
|
110 |
+
.venv\Scripts\activate
|
111 |
+
```
|
112 |
+
|
113 |
+
Step 3. 配置环境变量
|
114 |
+
|
115 |
+
```bash
|
116 |
+
# 复制 .env 环境变量到本地
|
117 |
+
cp .env.example .env
|
118 |
+
```
|
119 |
+
|
120 |
+
Step 4. 按照环境变量当中的注释依次填写配置信息(在此步骤可以配置 Ollama)
|
121 |
+
|
122 |
+
Step 5. 本地运行程序
|
123 |
+
|
124 |
+
```bash
|
125 |
+
# 本地运行
|
126 |
+
uvicorn app.main:app
|
127 |
+
```
|
128 |
+
|
129 |
+
Step 6. 配置程序到你的 Chatbox(推荐 [NextChat](https://nextchat.dev/)、[ChatBox](https://chatboxai.app/zh)、[LobeChat](https://lobechat.com/))
|
130 |
+
|
131 |
+
```bash
|
132 |
+
# 通常 baseUrl 为:http://127.0.0.1:8000/v1
|
133 |
+
```
|
134 |
+
|
135 |
+
**注:本项目采用 uv 作为包管理器,这是一个更快速更现代的管理方式,用于替代 pip,你可以[在此了解更多](https://docs.astral.sh/uv/)**
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
# Deployment
|
140 |
+
|
141 |
+
> 项目支持 Docker 服务器部署,可自行调用接入常用的 Chatbox,也可以作为渠道一直,将其视为一个特殊的 `DeepClaude`模型接入到 [OneAPI](https://github.com/songquanpeng/one-api) 等产品使用。
|
142 |
+
|
143 |
+
## Railway 一键部署(推荐)
|
144 |
+
<details>
|
145 |
+
<summary><strong>一键部署到 Railway</strong></summary>
|
146 |
+
|
147 |
+
<div>
|
148 |
+
1. 首先 fork 一份代码。
|
149 |
+
|
150 |
+
2. 点击打开 Railway 主页:https://railway.com?referralCode=RNTGCA
|
151 |
+
|
152 |
+
3. 点击 `Deploy a new project`
|
153 |
+

|
154 |
+
|
155 |
+
4. 点击 `Deploy from GitHub repo`
|
156 |
+

|
157 |
+
|
158 |
+
5. 点击 `Login with GitHub`
|
159 |
+

|
160 |
+
|
161 |
+
6. 选择升级,选择只需 5 美金的 Hobby Plan 即可
|
162 |
+

|
163 |
+

|
164 |
+
|
165 |
+
1. 点击 `Create a New Project`
|
166 |
+

|
167 |
+
|
168 |
+
1. 继续选择 `Deploy from GitHub repo`
|
169 |
+

|
170 |
+
|
171 |
+
1. 输入框内搜索`DeepClaude`,选中后点击。
|
172 |
+

|
173 |
+
|
174 |
+
1. 选择`Variable`,并点击`New Variable` 按钮,按照环境变量内的键值对进行填写
|
175 |
+

|
176 |
+
|
177 |
+
1. 填写完成后重新点击 `Deploy` 按钮,等待数秒后即可完成部署
|
178 |
+

|
179 |
+
|
180 |
+
1. 部署完成后,点击 `Settings` 按钮,然后向下查看到 `Networking` 区域,然后选择 `Generate Domain`,并输入 `8000` 作为端口号
|
181 |
+

|
182 |
+

|
183 |
+

|
184 |
+
|
185 |
+
1. 接下来就可以在你喜欢的 Chatbox 内配置使用或作为 API 使用了
|
186 |
+

|
187 |
+
|
188 |
+
</div>
|
189 |
+
</details>
|
190 |
+
|
191 |
+
## Zeabur 一键部署(一定概率下会遇到 Domain 生成问题,需要重新创建 project 部署)
|
192 |
+
<details>
|
193 |
+
<summary><strong>一键部署到 Zeabur</strong></summary>
|
194 |
+
<div>
|
195 |
+
|
196 |
+
|
197 |
+
[](https://zeabur.com?referralCode=ErlichLiu&utm_source=ErlichLiu)
|
198 |
+
|
199 |
+
1. 首先 fork 一份代码。
|
200 |
+
2. 进入 [Zeabur](https://zeabur.com?referralCode=ErlichLiu&utm_source=ErlichLiu),登录。
|
201 |
+
3. 选择 Create New Project,选择地区为新加坡或日本区域。
|
202 |
+
4. 选择项目来源为 Github,搜索框搜索 DeepClaude 后确认,然后点击右下角的 Config。
|
203 |
+
5. 在 Environment Variables 区域点击 Add Environment Variables,逐个填写 .env.example 当中的配置,等号左右对应的就是 Environment Variables 里的 Key 和 Value。(注意:ALLOW_API_KEY 是你自己规定的外部访问你的服务时需要填写的 API KEY,可以随意填写,不要有空格)
|
204 |
+
6. 全部编辑完成后点击 Next,然后点击 Deploy,静待片刻即可完成部署。
|
205 |
+
7. 完成部署后点击当前面板上部的 Networking,点击 Public 区域的 Generate Domain(也可以配置自己的域名),然后输入一个你想要的域名即可(这个完整的 xxx.zeabur.app 将是你接下来在任何开源对话框、Cursor、Roo Code 等产品内填写的 baseUrl)
|
206 |
+
8. 接下来就可以去上述所说的任何的项目里去配置使用你的 API 了,也可以配置到 One API,作为一个 OpenAI 渠道使用。(晚点会补充这部分的配置方法)
|
207 |
+
</div>
|
208 |
+
</details>
|
209 |
+
|
210 |
+
## 使用 docker-compose 部署(Docker 镜像将随着 main 分支自动更新到最新)
|
211 |
+
|
212 |
+
推荐可以使用 `docker-compose.yml` 文件进行部署,更加方便快捷。
|
213 |
+
|
214 |
+
1. 确保已安装 Docker Compose。
|
215 |
+
2. 复制 `docker-compose.yml` 文件到项目根目录。
|
216 |
+
3. 修改 `docker-compose.yml` 文件中的环境变量配置,将 `your_allow_api_key`,`your_allow_origins`,`your_deepseek_api_key` 和 `your_claude_api_key` 替换为你的实际配置。
|
217 |
+
4. 在项目根目录下运行 Docker Compose 命令启动服务:
|
218 |
+
|
219 |
+
```bash
|
220 |
+
docker-compose up -d
|
221 |
+
```
|
222 |
+
|
223 |
+
服务启动后,DeepClaude API 将在 `http://宿主机IP:8000/v1/chat/completions` 上进行访问。
|
224 |
+
|
225 |
+
|
226 |
+
## Docker 部署(自行 Build)
|
227 |
+
|
228 |
+
1. **构建 Docker 镜像**
|
229 |
+
|
230 |
+
在项目根目录下,使用 Dockerfile 构建镜像。请确保已经安装 Docker 环境。
|
231 |
+
|
232 |
+
```bash
|
233 |
+
docker build -t deepclaude:latest .
|
234 |
+
```
|
235 |
+
|
236 |
+
2. **运行 Docker 容器**
|
237 |
+
|
238 |
+
运行构建好的 Docker 镜像,将容器的 8000 端口映射到宿主机的 8000 端口。同时,通过 `-e` 参数设置必要的环境变量,包括 API 密钥、允许的域名等。请根据 `.env.example` 文件中的说明配置环境变量。
|
239 |
+
|
240 |
+
```bash
|
241 |
+
docker run -d \
|
242 |
+
-p 8000:8000 \
|
243 |
+
-e ALLOW_API_KEY=your_allow_api_key \
|
244 |
+
-e ALLOW_ORIGINS="*" \
|
245 |
+
-e DEEPSEEK_API_KEY=your_deepseek_api_key \
|
246 |
+
-e DEEPSEEK_API_URL=https://api.deepseek.com/v1/chat/completions \
|
247 |
+
-e DEEPSEEK_MODEL=deepseek-reasoner \
|
248 |
+
-e IS_ORIGIN_REASONING=true \
|
249 |
+
-e CLAUDE_API_KEY=your_claude_api_key \
|
250 |
+
-e CLAUDE_MODEL=claude-3-5-sonnet-20241022 \
|
251 |
+
-e CLAUDE_PROVIDER=anthropic \
|
252 |
+
-e CLAUDE_API_URL=https://api.anthropic.com/v1/messages \
|
253 |
+
-e LOG_LEVEL=INFO \
|
254 |
+
--restart always \
|
255 |
+
deepclaude:latest
|
256 |
+
```
|
257 |
+
|
258 |
+
请替换上述命令中的 `your_allow_api_key`,`your_allow_origins`,`your_deepseek_api_key` 和 `your_claude_api_key` 为你实际的 API 密钥和配置。`ALLOW_ORIGINS` 请设置为允许访问的域名,如 `"http://localhost:3000,https://chat.example.com"` 或 `"*"` 表示允许所有来源。
|
259 |
+
|
260 |
+
|
261 |
+
# Automatic fork sync
|
262 |
+
项目已经支持 Github Actions 自动更新 fork 项目的代码,保持你的 fork 版本与当前 main 分支保持一致。如需开启,请 frok 后在 Settings 中开启 Actions 权限即可。
|
263 |
+
|
264 |
+
|
265 |
+
# Technology Stack
|
266 |
+
- [FastAPI](https://fastapi.tiangolo.com/)
|
267 |
+
- [UV as package manager](https://docs.astral.sh/uv/#project-management)
|
268 |
+
- [Docker](https://www.docker.com/)
|
269 |
+
|
270 |
+
# Star History
|
271 |
+
|
272 |
+
[](https://star-history.com/#ErlichLiu/DeepClaude&Date)
|
273 |
+
|
274 |
+
# Buy me a coffee
|
275 |
+
<img src="https://img.erlich.fun/personal-blog/uPic/IMG_3625.JPG" alt="微信赞赏码" style="width: 400px;"/>
|
276 |
+
|
277 |
+
# About Me
|
278 |
+
- Email: [email protected]
|
279 |
+
- Website: [Erlichliu](https://erlich.fun)
|
app/clients/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .base_client import BaseClient
|
2 |
+
from .deepseek_client import DeepSeekClient
|
3 |
+
from .claude_client import ClaudeClient
|
4 |
+
|
5 |
+
__all__ = ['BaseClient', 'DeepSeekClient', 'ClaudeClient']
|
app/clients/base_client.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""基础客户端类,定义通用接口"""
|
2 |
+
from typing import AsyncGenerator, Any
|
3 |
+
import aiohttp
|
4 |
+
from app.utils.logger import logger
|
5 |
+
from abc import ABC, abstractmethod
|
6 |
+
|
7 |
+
|
8 |
+
class BaseClient(ABC):
|
9 |
+
def __init__(self, api_key: str, api_url: str):
|
10 |
+
"""初始化基础客户端
|
11 |
+
|
12 |
+
Args:
|
13 |
+
api_key: API密钥
|
14 |
+
api_url: API地址
|
15 |
+
"""
|
16 |
+
self.api_key = api_key
|
17 |
+
self.api_url = api_url
|
18 |
+
|
19 |
+
async def _make_request(self, headers: dict, data: dict) -> AsyncGenerator[bytes, None]:
|
20 |
+
"""发送请求并处理响应
|
21 |
+
|
22 |
+
Args:
|
23 |
+
headers: 请求头
|
24 |
+
data: 请求数据
|
25 |
+
|
26 |
+
Yields:
|
27 |
+
bytes: 原始响应数据
|
28 |
+
"""
|
29 |
+
try:
|
30 |
+
async with aiohttp.ClientSession() as session:
|
31 |
+
async with session.post(self.api_url, headers=headers, json=data) as response:
|
32 |
+
if response.status != 200:
|
33 |
+
error_text = await response.text()
|
34 |
+
logger.error(f"API 请求失败: {error_text}")
|
35 |
+
return
|
36 |
+
|
37 |
+
async for chunk in response.content.iter_any():
|
38 |
+
yield chunk
|
39 |
+
|
40 |
+
except Exception as e:
|
41 |
+
logger.error(f"请求 API 时发生错误: {e}")
|
42 |
+
|
43 |
+
@abstractmethod
|
44 |
+
async def stream_chat(self, messages: list, model: str) -> AsyncGenerator[tuple[str, str], None]:
|
45 |
+
"""流式对话,由子类实现
|
46 |
+
|
47 |
+
Args:
|
48 |
+
messages: 消息列表
|
49 |
+
model: 模型名称
|
50 |
+
|
51 |
+
Yields:
|
52 |
+
tuple[str, str]: (内容类型, 内容)
|
53 |
+
"""
|
54 |
+
pass
|
app/clients/claude_client.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Claude API 客户端"""
|
2 |
+
import json
|
3 |
+
from typing import AsyncGenerator
|
4 |
+
from app.utils.logger import logger
|
5 |
+
from .base_client import BaseClient
|
6 |
+
|
7 |
+
|
8 |
+
class ClaudeClient(BaseClient):
|
9 |
+
def __init__(self, api_key: str, api_url: str = "https://api.anthropic.com/v1/messages", provider: str = "anthropic"):
|
10 |
+
"""初始化 Claude 客户端
|
11 |
+
|
12 |
+
Args:
|
13 |
+
api_key: Claude API密钥
|
14 |
+
api_url: Claude API地址
|
15 |
+
is_openrouter: 是否使用 OpenRouter API
|
16 |
+
"""
|
17 |
+
super().__init__(api_key, api_url)
|
18 |
+
self.provider = provider
|
19 |
+
|
20 |
+
async def stream_chat(
|
21 |
+
self,
|
22 |
+
messages: list,
|
23 |
+
model_arg: tuple[float, float, float, float],
|
24 |
+
model: str,
|
25 |
+
stream: bool = True
|
26 |
+
) -> AsyncGenerator[tuple[str, str], None]:
|
27 |
+
"""流式或非流式对话
|
28 |
+
|
29 |
+
Args:
|
30 |
+
messages: 消息列表
|
31 |
+
model_arg: 模型参数元组[temperature, top_p, presence_penalty, frequency_penalty]
|
32 |
+
model: 模型名称。如果是 OpenRouter, 会自动转换为 'anthropic/claude-3.5-sonnet' 格式
|
33 |
+
stream: 是否使用流式输出,默认为 True
|
34 |
+
|
35 |
+
Yields:
|
36 |
+
tuple[str, str]: (内容类型, 内容)
|
37 |
+
内容类型: "answer"
|
38 |
+
内容: 实际的文本内容
|
39 |
+
"""
|
40 |
+
|
41 |
+
if self.provider == "openrouter":
|
42 |
+
# 转换模型名称为 OpenRouter 格式
|
43 |
+
model = "anthropic/claude-3.5-sonnet"
|
44 |
+
|
45 |
+
headers = {
|
46 |
+
"Authorization": f"Bearer {self.api_key}",
|
47 |
+
"Content-Type": "application/json",
|
48 |
+
"HTTP-Referer": "https://github.com/ErlichLiu/DeepClaude", # OpenRouter 需要
|
49 |
+
"X-Title": "DeepClaude" # OpenRouter 需要
|
50 |
+
}
|
51 |
+
|
52 |
+
data = {
|
53 |
+
"model": model, # OpenRouter 使用 anthropic/claude-3.5-sonnet 格式
|
54 |
+
"messages": messages,
|
55 |
+
"stream": stream,
|
56 |
+
"temperature": 1 if model_arg[0] < 0 or model_arg[0] > 1 else model_arg[0],
|
57 |
+
"top_p": model_arg[1],
|
58 |
+
"presence_penalty": model_arg[2],
|
59 |
+
"frequency_penalty": model_arg[3]
|
60 |
+
}
|
61 |
+
elif self.provider == "oneapi":
|
62 |
+
headers = {
|
63 |
+
"Authorization": f"Bearer {self.api_key}",
|
64 |
+
"Content-Type": "application/json"
|
65 |
+
}
|
66 |
+
|
67 |
+
data = {
|
68 |
+
"model": model,
|
69 |
+
"messages": messages,
|
70 |
+
"stream": stream,
|
71 |
+
"temperature": 1 if model_arg[0] < 0 or model_arg[0] > 1 else model_arg[0],
|
72 |
+
"top_p": model_arg[1],
|
73 |
+
"presence_penalty": model_arg[2],
|
74 |
+
"frequency_penalty": model_arg[3]
|
75 |
+
}
|
76 |
+
elif self.provider == "anthropic":
|
77 |
+
headers = {
|
78 |
+
"x-api-key": self.api_key,
|
79 |
+
"anthropic-version": "2023-06-01",
|
80 |
+
"content-type": "application/json",
|
81 |
+
"accept": "text/event-stream" if stream else "application/json",
|
82 |
+
}
|
83 |
+
|
84 |
+
data = {
|
85 |
+
"model": model,
|
86 |
+
"messages": messages,
|
87 |
+
"max_tokens": 8192,
|
88 |
+
"stream": stream,
|
89 |
+
"temperature": 1 if model_arg[0] < 0 or model_arg[0] > 1 else model_arg[0], # Claude仅支持temperature与top_p
|
90 |
+
"top_p": model_arg[1]
|
91 |
+
}
|
92 |
+
else:
|
93 |
+
raise ValueError(f"不支持的Claude Provider: {self.provider}")
|
94 |
+
|
95 |
+
logger.debug(f"开始对话:{data}")
|
96 |
+
|
97 |
+
if stream:
|
98 |
+
async for chunk in self._make_request(headers, data):
|
99 |
+
chunk_str = chunk.decode('utf-8')
|
100 |
+
if not chunk_str.strip():
|
101 |
+
continue
|
102 |
+
|
103 |
+
for line in chunk_str.split('\n'):
|
104 |
+
if line.startswith('data: '):
|
105 |
+
json_str = line[6:] # 去掉 'data: ' 前缀
|
106 |
+
if json_str.strip() == '[DONE]':
|
107 |
+
return
|
108 |
+
|
109 |
+
try:
|
110 |
+
data = json.loads(json_str)
|
111 |
+
if self.provider in ("openrouter", "oneapi"):
|
112 |
+
# OpenRouter/OneApi 格式
|
113 |
+
content = data.get('choices', [{}])[0].get('delta', {}).get('content', '')
|
114 |
+
if content:
|
115 |
+
yield "answer", content
|
116 |
+
elif self.provider == "anthropic":
|
117 |
+
# Anthropic 格式
|
118 |
+
if data.get('type') == 'content_block_delta':
|
119 |
+
content = data.get('delta', {}).get('text', '')
|
120 |
+
if content:
|
121 |
+
yield "answer", content
|
122 |
+
else:
|
123 |
+
raise ValueError(f"不支持的Claude Provider: {self.provider}")
|
124 |
+
except json.JSONDecodeError:
|
125 |
+
continue
|
126 |
+
else:
|
127 |
+
# 非流式输出
|
128 |
+
async for chunk in self._make_request(headers, data):
|
129 |
+
try:
|
130 |
+
response = json.loads(chunk.decode('utf-8'))
|
131 |
+
if self.provider in ("openrouter", "oneapi"):
|
132 |
+
content = response.get('choices', [{}])[0].get('message', {}).get('content', '')
|
133 |
+
if content:
|
134 |
+
yield "answer", content
|
135 |
+
elif self.provider == "anthropic":
|
136 |
+
content = response.get('content', [{}])[0].get('text', '')
|
137 |
+
if content:
|
138 |
+
yield "answer", content
|
139 |
+
else:
|
140 |
+
raise ValueError(f"不支持的Claude Provider: {self.provider}")
|
141 |
+
except json.JSONDecodeError:
|
142 |
+
continue
|
app/clients/deepseek_client.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""DeepSeek API 客户端"""
|
2 |
+
import json
|
3 |
+
from typing import AsyncGenerator
|
4 |
+
from app.utils.logger import logger
|
5 |
+
from .base_client import BaseClient
|
6 |
+
|
7 |
+
|
8 |
+
class DeepSeekClient(BaseClient):
|
9 |
+
def __init__(self, api_key: str, api_url: str = "https://api.siliconflow.cn/v1/chat/completions", provider: str = "deepseek"):
|
10 |
+
"""初始化 DeepSeek 客户端
|
11 |
+
|
12 |
+
Args:
|
13 |
+
api_key: DeepSeek API密钥
|
14 |
+
api_url: DeepSeek API地址
|
15 |
+
"""
|
16 |
+
super().__init__(api_key, api_url)
|
17 |
+
self.provider = provider
|
18 |
+
|
19 |
+
def _process_think_tag_content(self, content: str) -> tuple[bool, str]:
|
20 |
+
"""处理包含 think 标签的内容
|
21 |
+
|
22 |
+
Args:
|
23 |
+
content: 需要处理的内容字符串
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
tuple[bool, str]:
|
27 |
+
bool: 是否检测到完整的 think 标签对
|
28 |
+
str: 处理后的内容
|
29 |
+
"""
|
30 |
+
has_start = "<think>" in content
|
31 |
+
has_end = "</think>" in content
|
32 |
+
|
33 |
+
if has_start and has_end:
|
34 |
+
return True, content
|
35 |
+
elif has_start:
|
36 |
+
return False, content
|
37 |
+
elif not has_start and not has_end:
|
38 |
+
return False, content
|
39 |
+
else:
|
40 |
+
return True, content
|
41 |
+
|
42 |
+
async def stream_chat(self, messages: list, model: str = "deepseek-ai/DeepSeek-R1", is_origin_reasoning: bool = True) -> AsyncGenerator[tuple[str, str], None]:
|
43 |
+
"""流式对话
|
44 |
+
|
45 |
+
Args:
|
46 |
+
messages: 消息列表
|
47 |
+
model: 模型名称
|
48 |
+
|
49 |
+
Yields:
|
50 |
+
tuple[str, str]: (内容类型, 内容)
|
51 |
+
内容类型: "reasoning" 或 "content"
|
52 |
+
内容: 实际的文本内容
|
53 |
+
"""
|
54 |
+
headers = {
|
55 |
+
"Authorization": f"Bearer {self.api_key}",
|
56 |
+
"Content-Type": "application/json",
|
57 |
+
"Accept": "text/event-stream",
|
58 |
+
}
|
59 |
+
data = {
|
60 |
+
"model": model,
|
61 |
+
"messages": messages,
|
62 |
+
"stream": True,
|
63 |
+
}
|
64 |
+
|
65 |
+
logger.debug(f"开始流式对话:{data}")
|
66 |
+
|
67 |
+
accumulated_content = ""
|
68 |
+
is_collecting_think = False
|
69 |
+
|
70 |
+
async for chunk in self._make_request(headers, data):
|
71 |
+
chunk_str = chunk.decode('utf-8')
|
72 |
+
|
73 |
+
try:
|
74 |
+
lines = chunk_str.splitlines()
|
75 |
+
for line in lines:
|
76 |
+
if line.startswith("data: "):
|
77 |
+
json_str = line[len("data: "):]
|
78 |
+
if json_str == "[DONE]":
|
79 |
+
return
|
80 |
+
|
81 |
+
data = json.loads(json_str)
|
82 |
+
if data and data.get("choices") and data["choices"][0].get("delta"):
|
83 |
+
delta = data["choices"][0]["delta"]
|
84 |
+
|
85 |
+
if is_origin_reasoning:
|
86 |
+
# 处理 reasoning_content
|
87 |
+
if delta.get("reasoning_content"):
|
88 |
+
content = delta["reasoning_content"]
|
89 |
+
logger.debug(f"提取推理内容:{content}")
|
90 |
+
yield "reasoning", content
|
91 |
+
|
92 |
+
if delta.get("reasoning_content") is None and delta.get("content"):
|
93 |
+
content = delta["content"]
|
94 |
+
logger.info(f"提取内容信息,推理阶段结束: {content}")
|
95 |
+
yield "content", content
|
96 |
+
else:
|
97 |
+
# 处理其他模型的输出
|
98 |
+
if delta.get("content"):
|
99 |
+
content = delta["content"]
|
100 |
+
if content == "": # 只跳过完全空的字符串
|
101 |
+
continue
|
102 |
+
logger.debug(f"非原生推理内容:{content}")
|
103 |
+
accumulated_content += content
|
104 |
+
|
105 |
+
# 检查累积的内容是否包含完整的 think 标签对
|
106 |
+
is_complete, processed_content = self._process_think_tag_content(accumulated_content)
|
107 |
+
|
108 |
+
if "<think>" in content and not is_collecting_think:
|
109 |
+
# 开始收集推理内容
|
110 |
+
logger.debug(f"开始收集推理内容:{content}")
|
111 |
+
is_collecting_think = True
|
112 |
+
yield "reasoning", content
|
113 |
+
elif is_collecting_think:
|
114 |
+
if "</think>" in content:
|
115 |
+
# 推理内��结束
|
116 |
+
logger.debug(f"推理内容结束:{content}")
|
117 |
+
is_collecting_think = False
|
118 |
+
yield "reasoning", content
|
119 |
+
# 输出空的 content 来触发 Claude 处理
|
120 |
+
yield "content", ""
|
121 |
+
# 重置累积内容
|
122 |
+
accumulated_content = ""
|
123 |
+
else:
|
124 |
+
# 继续收集推理内容
|
125 |
+
yield "reasoning", content
|
126 |
+
else:
|
127 |
+
# 普通内容
|
128 |
+
yield "content", content
|
129 |
+
|
130 |
+
except json.JSONDecodeError as e:
|
131 |
+
logger.error(f"JSON 解析错误: {e}")
|
132 |
+
except Exception as e:
|
133 |
+
logger.error(f"处理 chunk 时发生错误: {e}")
|
app/deepclaude/__init__.py
ADDED
File without changes
|
app/deepclaude/deepclaude.py
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""DeepClaude 服务,用于协调 DeepSeek 和 Claude API 的调用"""
|
2 |
+
import json
|
3 |
+
import time
|
4 |
+
import tiktoken
|
5 |
+
import asyncio
|
6 |
+
from typing import AsyncGenerator
|
7 |
+
from app.utils.logger import logger
|
8 |
+
from app.clients import DeepSeekClient, ClaudeClient
|
9 |
+
|
10 |
+
|
11 |
+
class DeepClaude:
|
12 |
+
"""处理 DeepSeek 和 Claude API 的流式输出衔接"""
|
13 |
+
|
14 |
+
def __init__(self, deepseek_api_key: str, claude_api_key: str,
|
15 |
+
deepseek_api_url: str = "https://api.deepseek.com/v1/chat/completions",
|
16 |
+
claude_api_url: str = "https://api.anthropic.com/v1/messages",
|
17 |
+
claude_provider: str = "anthropic",
|
18 |
+
is_origin_reasoning: bool = True):
|
19 |
+
"""初始化 API 客户端
|
20 |
+
|
21 |
+
Args:
|
22 |
+
deepseek_api_key: DeepSeek API密钥
|
23 |
+
claude_api_key: Claude API密钥
|
24 |
+
"""
|
25 |
+
self.deepseek_client = DeepSeekClient(deepseek_api_key, deepseek_api_url)
|
26 |
+
self.claude_client = ClaudeClient(claude_api_key, claude_api_url, claude_provider)
|
27 |
+
self.is_origin_reasoning = is_origin_reasoning
|
28 |
+
|
29 |
+
async def chat_completions_with_stream(
|
30 |
+
self,
|
31 |
+
messages: list,
|
32 |
+
model_arg: tuple[float, float, float, float],
|
33 |
+
deepseek_model: str = "deepseek-reasoner",
|
34 |
+
claude_model: str = "claude-3-5-sonnet-20241022"
|
35 |
+
) -> AsyncGenerator[bytes, None]:
|
36 |
+
"""处理完整的流式输出过程
|
37 |
+
|
38 |
+
Args:
|
39 |
+
messages: 初始消息列表
|
40 |
+
model_arg: 模型参数
|
41 |
+
deepseek_model: DeepSeek 模型名称
|
42 |
+
claude_model: Claude 模型名称
|
43 |
+
|
44 |
+
Yields:
|
45 |
+
字节流数据,格式如下:
|
46 |
+
{
|
47 |
+
"id": "chatcmpl-xxx",
|
48 |
+
"object": "chat.completion.chunk",
|
49 |
+
"created": timestamp,
|
50 |
+
"model": model_name,
|
51 |
+
"choices": [{
|
52 |
+
"index": 0,
|
53 |
+
"delta": {
|
54 |
+
"role": "assistant",
|
55 |
+
"reasoning_content": reasoning_content,
|
56 |
+
"content": content
|
57 |
+
}
|
58 |
+
}]
|
59 |
+
}
|
60 |
+
"""
|
61 |
+
# 生成唯一的会话ID和时间戳
|
62 |
+
chat_id = f"chatcmpl-{hex(int(time.time() * 1000))[2:]}"
|
63 |
+
created_time = int(time.time())
|
64 |
+
|
65 |
+
# 创建队列,用于收集输出数据
|
66 |
+
output_queue = asyncio.Queue()
|
67 |
+
# 队列,用于传递 DeepSeek 推理内容给 Claude
|
68 |
+
claude_queue = asyncio.Queue()
|
69 |
+
|
70 |
+
# 用于存储 DeepSeek 的推理累积内容
|
71 |
+
reasoning_content = []
|
72 |
+
|
73 |
+
async def process_deepseek():
|
74 |
+
logger.info(f"开始处理 DeepSeek 流,使用模型:{deepseek_model}, 提供商: {self.deepseek_client.provider}")
|
75 |
+
try:
|
76 |
+
async for content_type, content in self.deepseek_client.stream_chat(messages, deepseek_model, self.is_origin_reasoning):
|
77 |
+
if content_type == "reasoning":
|
78 |
+
reasoning_content.append(content)
|
79 |
+
response = {
|
80 |
+
"id": chat_id,
|
81 |
+
"object": "chat.completion.chunk",
|
82 |
+
"created": created_time,
|
83 |
+
"model": deepseek_model,
|
84 |
+
"choices": [{
|
85 |
+
"index": 0,
|
86 |
+
"delta": {
|
87 |
+
"role": "assistant",
|
88 |
+
"reasoning_content": content,
|
89 |
+
"content": ""
|
90 |
+
}
|
91 |
+
}]
|
92 |
+
}
|
93 |
+
await output_queue.put(f"data: {json.dumps(response)}\n\n".encode('utf-8'))
|
94 |
+
elif content_type == "content":
|
95 |
+
# 当收到 content 类型时,将完整的推理内容发送到 claude_queue,并结束 DeepSeek 流处理
|
96 |
+
logger.info(f"DeepSeek 推理完成,收集到的推理内容长度:{len(''.join(reasoning_content))}")
|
97 |
+
await claude_queue.put("".join(reasoning_content))
|
98 |
+
break
|
99 |
+
except Exception as e:
|
100 |
+
logger.error(f"处理 DeepSeek 流时发生错误: {e}")
|
101 |
+
await claude_queue.put("")
|
102 |
+
# 用 None 标记 DeepSeek 任务结束
|
103 |
+
logger.info("DeepSeek 任务处理完成,标记结束")
|
104 |
+
await output_queue.put(None)
|
105 |
+
|
106 |
+
async def process_claude():
|
107 |
+
try:
|
108 |
+
logger.info("等待获取 DeepSeek 的推理内容...")
|
109 |
+
reasoning = await claude_queue.get()
|
110 |
+
logger.debug(f"获取到推理内容,内容长度:{len(reasoning) if reasoning else 0}")
|
111 |
+
if not reasoning:
|
112 |
+
logger.warning("未能获取到有效的推理内容,将使用默认提示继续")
|
113 |
+
reasoning = "获取推理内容失败"
|
114 |
+
# 构造 Claude 的输入消息
|
115 |
+
claude_messages = messages.copy()
|
116 |
+
combined_content = f"""
|
117 |
+
Here's my another model's reasoning process:\n{reasoning}\n\n
|
118 |
+
Based on this reasoning, provide your response directly to me:"""
|
119 |
+
|
120 |
+
# 改造最后一个消息对象,判断消息对象是 role = user,然后在这个对象的 content 后追加新的 String
|
121 |
+
last_message = claude_messages[-1]
|
122 |
+
if last_message.get("role", "") == "user":
|
123 |
+
original_content = last_message["content"]
|
124 |
+
fixed_content = f"Here's my original input:\n{original_content}\n\n{combined_content}"
|
125 |
+
last_message["content"] = fixed_content
|
126 |
+
# 处理可能 messages 内存在 role = system 的情况,如果有,则去掉当前这一条的消息对象
|
127 |
+
claude_messages = [message for message in claude_messages if message.get("role", "") != "system"]
|
128 |
+
|
129 |
+
logger.info(f"开始处理 Claude 流,使用模型: {claude_model}, 提供商: {self.claude_client.provider}")
|
130 |
+
|
131 |
+
async for content_type, content in self.claude_client.stream_chat(
|
132 |
+
messages=claude_messages,
|
133 |
+
model_arg=model_arg,
|
134 |
+
model=claude_model,
|
135 |
+
):
|
136 |
+
if content_type == "answer":
|
137 |
+
response = {
|
138 |
+
"id": chat_id,
|
139 |
+
"object": "chat.completion.chunk",
|
140 |
+
"created": created_time,
|
141 |
+
"model": claude_model,
|
142 |
+
"choices": [{
|
143 |
+
"index": 0,
|
144 |
+
"delta": {
|
145 |
+
"role": "assistant",
|
146 |
+
"content": content
|
147 |
+
}
|
148 |
+
}]
|
149 |
+
}
|
150 |
+
await output_queue.put(f"data: {json.dumps(response)}\n\n".encode('utf-8'))
|
151 |
+
except Exception as e:
|
152 |
+
logger.error(f"处理 Claude 流时发生错误: {e}")
|
153 |
+
# 用 None 标记 Claude 任务结束
|
154 |
+
logger.info("Claude 任务处理完成,标记结束")
|
155 |
+
await output_queue.put(None)
|
156 |
+
|
157 |
+
# 创建并发任务
|
158 |
+
deepseek_task = asyncio.create_task(process_deepseek())
|
159 |
+
claude_task = asyncio.create_task(process_claude())
|
160 |
+
|
161 |
+
# 等待两个任务完成,通过计数判断
|
162 |
+
finished_tasks = 0
|
163 |
+
while finished_tasks < 2:
|
164 |
+
item = await output_queue.get()
|
165 |
+
if item is None:
|
166 |
+
finished_tasks += 1
|
167 |
+
else:
|
168 |
+
yield item
|
169 |
+
|
170 |
+
# 发送结束标记
|
171 |
+
yield b'data: [DONE]\n\n'
|
172 |
+
|
173 |
+
async def chat_completions_without_stream(
|
174 |
+
self,
|
175 |
+
messages: list,
|
176 |
+
model_arg: tuple[float, float, float, float],
|
177 |
+
deepseek_model: str = "deepseek-reasoner",
|
178 |
+
claude_model: str = "claude-3-5-sonnet-20241022"
|
179 |
+
) -> dict:
|
180 |
+
"""处理非流式输出过程
|
181 |
+
|
182 |
+
Args:
|
183 |
+
messages: 初始消息列表
|
184 |
+
model_arg: 模型参数
|
185 |
+
deepseek_model: DeepSeek 模型名称
|
186 |
+
claude_model: Claude 模型名称
|
187 |
+
|
188 |
+
Returns:
|
189 |
+
dict: OpenAI 格式的完整响应
|
190 |
+
"""
|
191 |
+
chat_id = f"chatcmpl-{hex(int(time.time() * 1000))[2:]}"
|
192 |
+
created_time = int(time.time())
|
193 |
+
reasoning_content = []
|
194 |
+
|
195 |
+
# 1. 获取 DeepSeek 的推理内容(仍然使用流式)
|
196 |
+
try:
|
197 |
+
async for content_type, content in self.deepseek_client.stream_chat(messages, deepseek_model, self.is_origin_reasoning):
|
198 |
+
if content_type == "reasoning":
|
199 |
+
reasoning_content.append(content)
|
200 |
+
elif content_type == "content":
|
201 |
+
break
|
202 |
+
except Exception as e:
|
203 |
+
logger.error(f"获取 DeepSeek 推理内容时发生错误: {e}")
|
204 |
+
reasoning_content = ["获取推理内容失败"]
|
205 |
+
|
206 |
+
# 2. 构造 Claude 的输入消息
|
207 |
+
reasoning = "".join(reasoning_content)
|
208 |
+
claude_messages = messages.copy()
|
209 |
+
|
210 |
+
combined_content = f"""
|
211 |
+
Here's my another model's reasoning process:\n{reasoning}\n\n
|
212 |
+
Based on this reasoning, provide your response directly to me:"""
|
213 |
+
|
214 |
+
# 改造最后一个消息对象,判断消息对象是 role = user,然后在这个对象的 content 后追加新的 String
|
215 |
+
last_message = claude_messages[-1]
|
216 |
+
if last_message.get("role", "") == "user":
|
217 |
+
original_content = last_message["content"]
|
218 |
+
fixed_content = f"Here's my original input:\n{original_content}\n\n{combined_content}"
|
219 |
+
last_message["content"] = fixed_content
|
220 |
+
|
221 |
+
# 处理可能 messages 内存在 role = system 的情况
|
222 |
+
claude_messages = [message for message in claude_messages if message.get("role", "") != "system"]
|
223 |
+
|
224 |
+
# 拼接所有 content 为一个字符串,计算 token
|
225 |
+
token_content = "\n".join([message.get("content", "") for message in claude_messages])
|
226 |
+
encoding = tiktoken.encoding_for_model("gpt-4o")
|
227 |
+
input_tokens = encoding.encode(token_content)
|
228 |
+
logger.debug(f"输入 Tokens: {len(input_tokens)}")
|
229 |
+
|
230 |
+
logger.debug("claude messages: " + str(claude_messages))
|
231 |
+
# 3. 获取 Claude 的非流式响应
|
232 |
+
try:
|
233 |
+
answer = ""
|
234 |
+
async for content_type, content in self.claude_client.stream_chat(
|
235 |
+
messages=claude_messages,
|
236 |
+
model_arg=model_arg,
|
237 |
+
model=claude_model,
|
238 |
+
stream=False
|
239 |
+
):
|
240 |
+
if content_type == "answer":
|
241 |
+
answer += content
|
242 |
+
output_tokens = encoding.encode(answer)
|
243 |
+
logger.debug(f"输出 Tokens: {len(output_tokens)}")
|
244 |
+
|
245 |
+
# 4. 构造 OpenAI 格式的响应
|
246 |
+
return {
|
247 |
+
"id": chat_id,
|
248 |
+
"object": "chat.completion",
|
249 |
+
"created": created_time,
|
250 |
+
"model": claude_model,
|
251 |
+
"choices": [{
|
252 |
+
"index": 0,
|
253 |
+
"message": {
|
254 |
+
"role": "assistant",
|
255 |
+
"content": answer,
|
256 |
+
"reasoning_content": reasoning
|
257 |
+
},
|
258 |
+
"finish_reason": "stop"
|
259 |
+
}],
|
260 |
+
"usage": {
|
261 |
+
"prompt_tokens": len(input_tokens),
|
262 |
+
"completion_tokens": len(output_tokens),
|
263 |
+
"total_tokens": len(input_tokens + output_tokens)
|
264 |
+
}
|
265 |
+
}
|
266 |
+
except Exception as e:
|
267 |
+
logger.error(f"获取 Claude 响应时发生错误: {e}")
|
268 |
+
raise e
|
app/main.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
# 加载环境变量
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
from fastapi import FastAPI, Depends, Request
|
9 |
+
from fastapi.responses import StreamingResponse
|
10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
11 |
+
from app.utils.logger import logger
|
12 |
+
from app.utils.auth import verify_api_key
|
13 |
+
from app.deepclaude.deepclaude import DeepClaude
|
14 |
+
|
15 |
+
app = FastAPI(title="DeepClaude API")
|
16 |
+
|
17 |
+
# 从环境变量获取 CORS配置, API 密钥、地址以及模型名称
|
18 |
+
ALLOW_ORIGINS = os.getenv("ALLOW_ORIGINS", "*")
|
19 |
+
|
20 |
+
CLAUDE_API_KEY = os.getenv("CLAUDE_API_KEY")
|
21 |
+
CLAUDE_MODEL = os.getenv("CLAUDE_MODEL")
|
22 |
+
CLAUDE_PROVIDER = os.getenv("CLAUDE_PROVIDER", "anthropic") # Claude模型提供商, 默认为anthropic
|
23 |
+
CLAUDE_API_URL = os.getenv("CLAUDE_API_URL", "https://api.anthropic.com/v1/messages")
|
24 |
+
|
25 |
+
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
26 |
+
DEEPSEEK_API_URL = os.getenv("DEEPSEEK_API_URL")
|
27 |
+
DEEPSEEK_MODEL = os.getenv("DEEPSEEK_MODEL")
|
28 |
+
|
29 |
+
IS_ORIGIN_REASONING = os.getenv("IS_ORIGIN_REASONING", "True").lower() == "true"
|
30 |
+
|
31 |
+
# CORS设置
|
32 |
+
allow_origins_list = ALLOW_ORIGINS.split(",") if ALLOW_ORIGINS else [] # 将逗号分隔的字符串转换为列表
|
33 |
+
|
34 |
+
app.add_middleware(
|
35 |
+
CORSMiddleware,
|
36 |
+
allow_origins=allow_origins_list,
|
37 |
+
allow_credentials=True,
|
38 |
+
allow_methods=["*"],
|
39 |
+
allow_headers=["*"],
|
40 |
+
)
|
41 |
+
|
42 |
+
# 创建 DeepClaude 实例, 提出为Global变量
|
43 |
+
if not DEEPSEEK_API_KEY or not CLAUDE_API_KEY:
|
44 |
+
logger.critical("请设置环境变量 CLAUDE_API_KEY 和 DEEPSEEK_API_KEY")
|
45 |
+
sys.exit(1)
|
46 |
+
|
47 |
+
deep_claude = DeepClaude(
|
48 |
+
DEEPSEEK_API_KEY,
|
49 |
+
CLAUDE_API_KEY,
|
50 |
+
DEEPSEEK_API_URL,
|
51 |
+
CLAUDE_API_URL,
|
52 |
+
CLAUDE_PROVIDER,
|
53 |
+
IS_ORIGIN_REASONING
|
54 |
+
)
|
55 |
+
|
56 |
+
# 验证日志级别
|
57 |
+
logger.debug("当前日志级别为 DEBUG")
|
58 |
+
logger.info("开始请求")
|
59 |
+
|
60 |
+
@app.get("/", dependencies=[Depends(verify_api_key)])
|
61 |
+
async def root():
|
62 |
+
logger.info("访问了根路径")
|
63 |
+
return {"message": "Welcome to DeepClaude API"}
|
64 |
+
|
65 |
+
@app.get("/v1/models")
|
66 |
+
async def list_models():
|
67 |
+
"""
|
68 |
+
获取可用模型列表
|
69 |
+
返回格式遵循 OpenAI API 标准
|
70 |
+
"""
|
71 |
+
models = [{
|
72 |
+
"id": "deepclaude",
|
73 |
+
"object": "model",
|
74 |
+
"created": 1677610602,
|
75 |
+
"owned_by": "deepclaude",
|
76 |
+
"permission": [{
|
77 |
+
"id": "modelperm-deepclaude",
|
78 |
+
"object": "model_permission",
|
79 |
+
"created": 1677610602,
|
80 |
+
"allow_create_engine": False,
|
81 |
+
"allow_sampling": True,
|
82 |
+
"allow_logprobs": True,
|
83 |
+
"allow_search_indices": False,
|
84 |
+
"allow_view": True,
|
85 |
+
"allow_fine_tuning": False,
|
86 |
+
"organization": "*",
|
87 |
+
"group": None,
|
88 |
+
"is_blocking": False
|
89 |
+
}],
|
90 |
+
"root": "deepclaude",
|
91 |
+
"parent": None
|
92 |
+
}]
|
93 |
+
|
94 |
+
return {"object": "list", "data": models}
|
95 |
+
|
96 |
+
@app.post("/v1/chat/completions", dependencies=[Depends(verify_api_key)])
|
97 |
+
async def chat_completions(request: Request):
|
98 |
+
"""处理聊天完成请求,支持流式和非流式输出
|
99 |
+
|
100 |
+
请求体格式应与 OpenAI API 保持一致,包含:
|
101 |
+
- messages: 消息列表
|
102 |
+
- model: 模型名称(可选)
|
103 |
+
- stream: 是否使用流式输出(可选,默认为 True)
|
104 |
+
- temperature: 随机性 (可选)
|
105 |
+
- top_p: top_p (可选)
|
106 |
+
- presence_penalty: 话题新鲜度(可选)
|
107 |
+
- frequency_penalty: 频率惩罚度(可选)
|
108 |
+
"""
|
109 |
+
|
110 |
+
try:
|
111 |
+
# 1. 获取基础信息
|
112 |
+
body = await request.json()
|
113 |
+
messages = body.get("messages")
|
114 |
+
|
115 |
+
# 2. 获取并验证参数
|
116 |
+
model_arg = (
|
117 |
+
get_and_validate_params(body)
|
118 |
+
)
|
119 |
+
stream = model_arg[4] # 获取 stream 参数
|
120 |
+
|
121 |
+
# 3. 根据 stream 参数返回相应的响应
|
122 |
+
if stream:
|
123 |
+
return StreamingResponse(
|
124 |
+
deep_claude.chat_completions_with_stream(
|
125 |
+
messages=messages,
|
126 |
+
model_arg=model_arg[:4], # 不传递 stream 参数
|
127 |
+
deepseek_model=DEEPSEEK_MODEL,
|
128 |
+
claude_model=CLAUDE_MODEL
|
129 |
+
),
|
130 |
+
media_type="text/event-stream"
|
131 |
+
)
|
132 |
+
else:
|
133 |
+
# 非流式输出
|
134 |
+
response = await deep_claude.chat_completions_without_stream(
|
135 |
+
messages=messages,
|
136 |
+
model_arg=model_arg[:4], # 不传递 stream 参数
|
137 |
+
deepseek_model=DEEPSEEK_MODEL,
|
138 |
+
claude_model=CLAUDE_MODEL
|
139 |
+
)
|
140 |
+
return response
|
141 |
+
|
142 |
+
except Exception as e:
|
143 |
+
logger.error(f"处理请求时发生错误: {e}")
|
144 |
+
return {"error": str(e)}
|
145 |
+
|
146 |
+
|
147 |
+
def get_and_validate_params(body):
|
148 |
+
"""提取获取和验证请求参数的函数"""
|
149 |
+
# TODO: 默认值设定允许自定义
|
150 |
+
temperature: float = body.get("temperature", 0.5)
|
151 |
+
top_p: float = body.get("top_p", 0.9)
|
152 |
+
presence_penalty: float = body.get("presence_penalty", 0.0)
|
153 |
+
frequency_penalty: float = body.get("frequency_penalty", 0.0)
|
154 |
+
stream: bool = body.get("stream", True)
|
155 |
+
|
156 |
+
if "sonnet" in body.get("model", ""): # Only Sonnet 设定 temperature 必须在 0 到 1 之间
|
157 |
+
if not isinstance(temperature, (float)) or temperature < 0.0 or temperature > 1.0:
|
158 |
+
raise ValueError("Sonnet 设定 temperature 必须在 0 到 1 之间")
|
159 |
+
|
160 |
+
return (temperature, top_p, presence_penalty, frequency_penalty, stream)
|
app/utils/auth.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import HTTPException, Header
|
2 |
+
from typing import Optional
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from app.utils.logger import logger
|
6 |
+
|
7 |
+
# 加载 .env 文件
|
8 |
+
logger.info(f"当前工作目录: {os.getcwd()}")
|
9 |
+
logger.info("尝试加载.env文件...")
|
10 |
+
load_dotenv(override=True) # 添加override=True强制覆盖已存在的环境变量
|
11 |
+
|
12 |
+
# 获取环境变量
|
13 |
+
ALLOW_API_KEY = os.getenv("ALLOW_API_KEY")
|
14 |
+
logger.info(f"ALLOW_API_KEY环境变量状态: {'已设置' if ALLOW_API_KEY else '未设置'}")
|
15 |
+
|
16 |
+
if not ALLOW_API_KEY:
|
17 |
+
raise ValueError("ALLOW_API_KEY environment variable is not set")
|
18 |
+
|
19 |
+
# 打印API密钥的前4位用于调试
|
20 |
+
logger.info(f"Loaded API key starting with: {ALLOW_API_KEY[:4] if len(ALLOW_API_KEY) >= 4 else ALLOW_API_KEY}")
|
21 |
+
|
22 |
+
|
23 |
+
async def verify_api_key(authorization: Optional[str] = Header(None)) -> None:
|
24 |
+
"""验证API密钥
|
25 |
+
|
26 |
+
Args:
|
27 |
+
authorization (Optional[str], optional): Authorization header中的API密钥. Defaults to Header(None).
|
28 |
+
|
29 |
+
Raises:
|
30 |
+
HTTPException: 当Authorization header缺失或API密钥无效时抛出401错误
|
31 |
+
"""
|
32 |
+
if authorization is None:
|
33 |
+
logger.warning("请求缺少Authorization header")
|
34 |
+
raise HTTPException(
|
35 |
+
status_code=401,
|
36 |
+
detail="Missing Authorization header"
|
37 |
+
)
|
38 |
+
|
39 |
+
api_key = authorization.replace("Bearer ", "").strip()
|
40 |
+
if api_key != ALLOW_API_KEY:
|
41 |
+
logger.warning(f"无效的API密钥: {api_key}")
|
42 |
+
raise HTTPException(
|
43 |
+
status_code=401,
|
44 |
+
detail="Invalid API key"
|
45 |
+
)
|
46 |
+
|
47 |
+
logger.info("API密钥验证通过")
|
app/utils/logger.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import colorlog
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
# 确保环境变量被加载
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
def get_log_level() -> int:
|
11 |
+
"""从环境变量获取日志级别
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
int: logging 模块定义的日志级别
|
15 |
+
"""
|
16 |
+
level_map = {
|
17 |
+
'DEBUG': logging.DEBUG,
|
18 |
+
'INFO': logging.INFO,
|
19 |
+
'WARNING': logging.WARNING,
|
20 |
+
'ERROR': logging.ERROR,
|
21 |
+
'CRITICAL': logging.CRITICAL
|
22 |
+
}
|
23 |
+
|
24 |
+
level = os.getenv('LOG_LEVEL', 'INFO').upper()
|
25 |
+
return level_map.get(level, logging.INFO)
|
26 |
+
|
27 |
+
def setup_logger(name: str = "DeepClaude") -> logging.Logger:
|
28 |
+
"""设置一个彩色的logger
|
29 |
+
|
30 |
+
Args:
|
31 |
+
name (str, optional): logger的名称. Defaults to "DeepClaude".
|
32 |
+
|
33 |
+
Returns:
|
34 |
+
logging.Logger: 配置好的logger实例
|
35 |
+
"""
|
36 |
+
logger = colorlog.getLogger(name)
|
37 |
+
|
38 |
+
if logger.handlers:
|
39 |
+
return logger
|
40 |
+
|
41 |
+
# 从环境变量获取日志级别
|
42 |
+
log_level = get_log_level()
|
43 |
+
|
44 |
+
# 设置日志级别
|
45 |
+
logger.setLevel(log_level)
|
46 |
+
|
47 |
+
# 创建控制台处理器
|
48 |
+
console_handler = logging.StreamHandler(sys.stdout)
|
49 |
+
console_handler.setLevel(log_level)
|
50 |
+
|
51 |
+
# 设置彩色日志格式
|
52 |
+
formatter = colorlog.ColoredFormatter(
|
53 |
+
"%(log_color)s%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
54 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
55 |
+
log_colors={
|
56 |
+
'DEBUG': 'cyan',
|
57 |
+
'INFO': 'green',
|
58 |
+
'WARNING': 'yellow',
|
59 |
+
'ERROR': 'red',
|
60 |
+
'CRITICAL': 'red,bg_white',
|
61 |
+
}
|
62 |
+
)
|
63 |
+
|
64 |
+
console_handler.setFormatter(formatter)
|
65 |
+
logger.addHandler(console_handler)
|
66 |
+
|
67 |
+
return logger
|
68 |
+
|
69 |
+
# 创建一个默认的logger实例
|
70 |
+
logger = setup_logger()
|
pyproject.toml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "deepclaude"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "This is a project for combining DeepSeek and Claude into one api call. Full unleash the power of AI."
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.11"
|
7 |
+
dependencies = [
|
8 |
+
"aiohttp>=3.11.11",
|
9 |
+
"colorlog>=6.9.0",
|
10 |
+
"fastapi>=0.115.8",
|
11 |
+
"python-dotenv>=1.0.1",
|
12 |
+
"tiktoken>=0.8.0",
|
13 |
+
"uvicorn>=0.34.0",
|
14 |
+
]
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|