Update app.py
Browse files
app.py
CHANGED
@@ -7,9 +7,9 @@ from flask import Flask, request, jsonify, Response
|
|
7 |
# 1. 初始化Flask应用
|
8 |
app = Flask(__name__)
|
9 |
|
10 |
-
# 2. gpt-oss.com API的固定配置
|
11 |
GPT_OSS_API_URL = "https://api.gpt-oss.com/chatkit"
|
12 |
-
|
13 |
'authority': 'api.gpt-oss.com',
|
14 |
'accept': 'text/event-stream',
|
15 |
'content-type': 'application/json',
|
@@ -19,24 +19,45 @@ GPT_OSS_HEADERS = {
|
|
19 |
'x-selected-model': 'gpt-oss-120b',
|
20 |
}
|
21 |
|
22 |
-
# 3.
|
23 |
@app.route('/', methods=['GET'])
|
24 |
def root():
|
25 |
-
return jsonify({
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
#
|
28 |
def create_openai_chunk(content, model="gpt-oss-120b"):
|
29 |
return {
|
30 |
-
"id": f"chatcmpl-{str(uuid.uuid4())}",
|
31 |
-
"
|
32 |
-
"created": int(time.time()),
|
33 |
-
"model": model,
|
34 |
"choices": [{"index": 0, "delta": {"content": content}, "finish_reason": None}]
|
35 |
}
|
36 |
|
37 |
-
# 4. 核心API端点
|
38 |
@app.route('/v1/chat/completions', methods=['POST'])
|
39 |
def chat_completions_proxy():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
try:
|
41 |
openai_request_data = request.json
|
42 |
user_prompt = next((m['content'] for m in reversed(openai_request_data.get("messages", [])) if m.get('role') == 'user'), None)
|
@@ -45,10 +66,8 @@ def chat_completions_proxy():
|
|
45 |
except Exception as e:
|
46 |
return jsonify({"error": f"请求格式无效: {e}"}), 400
|
47 |
|
48 |
-
request_headers = GPT_OSS_HEADERS.copy()
|
49 |
request_headers['x-show-reasoning'] = 'true' if stream_requested else 'false'
|
50 |
|
51 |
-
gpt_oss_cookies = {'user_id': str(uuid.uuid4())}
|
52 |
gpt_oss_payload = {
|
53 |
"op": "threads.create",
|
54 |
"params": {"input": {"text": user_prompt, "content": [{"type": "input_text", "text": user_prompt}]}}
|
@@ -57,7 +76,7 @@ def chat_completions_proxy():
|
|
57 |
def _internal_proxy_stream():
|
58 |
try:
|
59 |
with requests.post(
|
60 |
-
GPT_OSS_API_URL, headers=request_headers,
|
61 |
json=gpt_oss_payload, stream=True, timeout=120
|
62 |
) as response:
|
63 |
response.raise_for_status()
|
@@ -69,40 +88,34 @@ def chat_completions_proxy():
|
|
69 |
except requests.exceptions.RequestException as e:
|
70 |
raise IOError(f"与后端服务通信失败: {e}")
|
71 |
|
|
|
72 |
if stream_requested:
|
73 |
def stream_formatter():
|
|
|
|
|
74 |
try:
|
75 |
for gpt_oss_data in _internal_proxy_stream():
|
76 |
event_type = gpt_oss_data.get('type')
|
77 |
update_type = gpt_oss_data.get('update', {}).get('type')
|
78 |
|
79 |
-
# 关键逻辑: 每收到一条思考,就立即格式化并发送
|
80 |
if event_type == 'thread.item_updated' and update_type == 'cot.entry_added':
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
# 1. 发送包含代码块的chunk
|
86 |
-
thought_chunk = create_openai_chunk(formatted_thought_block)
|
87 |
-
yield f"data: {json.dumps(thought_chunk)}\n\n"
|
88 |
-
|
89 |
-
# 2. 发送一个包含换行符的chunk,以在视觉上分隔不同的思考块
|
90 |
-
newline_chunk = create_openai_chunk("\n\n")
|
91 |
-
yield f"data: {json.dumps(newline_chunk)}\n\n"
|
92 |
-
|
93 |
-
# 正常流式传输最终答案
|
94 |
if event_type == 'thread.item_updated' and update_type == 'assistant_message.content_part.text_delta':
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
98 |
|
99 |
yield "data: [DONE]\n\n"
|
100 |
except IOError as e:
|
101 |
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
102 |
-
|
103 |
return Response(stream_formatter(), mimetype='text/event-stream')
|
104 |
-
|
105 |
-
else: # 非流式请求逻辑保持不变
|
106 |
try:
|
107 |
full_response_content = ""
|
108 |
for gpt_oss_data in _internal_proxy_stream():
|
@@ -113,7 +126,7 @@ def chat_completions_proxy():
|
|
113 |
"id": f"chatcmpl-{str(uuid.uuid4())}", "object": "chat.completion", "created": int(time.time()),
|
114 |
"model": "gpt-oss-120b",
|
115 |
"choices": [{"index": 0, "message": {"role": "assistant", "content": full_response_content.strip()}, "finish_reason": "stop"}],
|
116 |
-
"usage": {
|
117 |
}
|
118 |
return jsonify(final_response)
|
119 |
except IOError as e:
|
|
|
7 |
# 1. 初始化Flask应用
|
8 |
app = Flask(__name__)
|
9 |
|
10 |
+
# 2. gpt-oss.com API的固定配置 (移除了Cookie)
|
11 |
GPT_OSS_API_URL = "https://api.gpt-oss.com/chatkit"
|
12 |
+
BASE_GPT_OSS_HEADERS = {
|
13 |
'authority': 'api.gpt-oss.com',
|
14 |
'accept': 'text/event-stream',
|
15 |
'content-type': 'application/json',
|
|
|
19 |
'x-selected-model': 'gpt-oss-120b',
|
20 |
}
|
21 |
|
22 |
+
# 3. 根路由,包含新的使用说明
|
23 |
@app.route('/', methods=['GET'])
|
24 |
def root():
|
25 |
+
return jsonify({
|
26 |
+
"message": "欢迎使用 GPT-OSS to OpenAI 格式代理API",
|
27 |
+
"status": "ok",
|
28 |
+
"authentication_method": "使用动态API Key作为Session",
|
29 |
+
"api_key_format": "在'Authorization: Bearer'后填入 '你的user_id|你的session令牌'"
|
30 |
+
})
|
31 |
|
32 |
+
# 辅助函数:创建标准的OpenAI流式数据块
|
33 |
def create_openai_chunk(content, model="gpt-oss-120b"):
|
34 |
return {
|
35 |
+
"id": f"chatcmpl-{str(uuid.uuid4())}", "object": "chat.completion.chunk",
|
36 |
+
"created": int(time.time()), "model": model,
|
|
|
|
|
37 |
"choices": [{"index": 0, "delta": {"content": content}, "finish_reason": None}]
|
38 |
}
|
39 |
|
40 |
+
# 4. 核心API端点 (包含新的认证逻辑)
|
41 |
@app.route('/v1/chat/completions', methods=['POST'])
|
42 |
def chat_completions_proxy():
|
43 |
+
# --- 关键改动:解析API Key作为Session ---
|
44 |
+
auth_header = request.headers.get('Authorization')
|
45 |
+
if not auth_header or not auth_header.startswith('Bearer '):
|
46 |
+
return jsonify({"error": "缺少或格式错误的Authorization Header。请提供 'Bearer 你的组合密钥'。"}), 401
|
47 |
+
|
48 |
+
combined_key = auth_header.split('Bearer ')[1]
|
49 |
+
if '|' not in combined_key:
|
50 |
+
return jsonify({"error": "API Key格式错误。正确格式为 '你的user_id|你的session令牌'。"}), 401
|
51 |
+
|
52 |
+
try:
|
53 |
+
user_id, session_token = combined_key.split('|', 1)
|
54 |
+
except ValueError:
|
55 |
+
return jsonify({"error": "API Key格式解析失败。请确保格式为 'user_id|session_token'。"}), 401
|
56 |
+
|
57 |
+
# --- 动态构建本次请求的Headers ---
|
58 |
+
request_headers = BASE_GPT_OSS_HEADERS.copy()
|
59 |
+
request_headers['cookie'] = f"user_id={user_id}; session={session_token}"
|
60 |
+
|
61 |
try:
|
62 |
openai_request_data = request.json
|
63 |
user_prompt = next((m['content'] for m in reversed(openai_request_data.get("messages", [])) if m.get('role') == 'user'), None)
|
|
|
66 |
except Exception as e:
|
67 |
return jsonify({"error": f"请求格式无效: {e}"}), 400
|
68 |
|
|
|
69 |
request_headers['x-show-reasoning'] = 'true' if stream_requested else 'false'
|
70 |
|
|
|
71 |
gpt_oss_payload = {
|
72 |
"op": "threads.create",
|
73 |
"params": {"input": {"text": user_prompt, "content": [{"type": "input_text", "text": user_prompt}]}}
|
|
|
76 |
def _internal_proxy_stream():
|
77 |
try:
|
78 |
with requests.post(
|
79 |
+
GPT_OSS_API_URL, headers=request_headers,
|
80 |
json=gpt_oss_payload, stream=True, timeout=120
|
81 |
) as response:
|
82 |
response.raise_for_status()
|
|
|
88 |
except requests.exceptions.RequestException as e:
|
89 |
raise IOError(f"与后端服务通信失败: {e}")
|
90 |
|
91 |
+
# 流式和非流式处理逻辑保持不变
|
92 |
if stream_requested:
|
93 |
def stream_formatter():
|
94 |
+
thinking_buffer = []
|
95 |
+
thinking_block_sent = False
|
96 |
try:
|
97 |
for gpt_oss_data in _internal_proxy_stream():
|
98 |
event_type = gpt_oss_data.get('type')
|
99 |
update_type = gpt_oss_data.get('update', {}).get('type')
|
100 |
|
|
|
101 |
if event_type == 'thread.item_updated' and update_type == 'cot.entry_added':
|
102 |
+
thinking_buffer.append(f"- {gpt_oss_data['update']['entry']['content']}")
|
103 |
+
continue
|
104 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
if event_type == 'thread.item_updated' and update_type == 'assistant_message.content_part.text_delta':
|
106 |
+
if not thinking_block_sent and thinking_buffer:
|
107 |
+
all_thoughts = "\n".join(thinking_buffer)
|
108 |
+
formatted_block = f"```markdown\n[思考过程]\n{all_thoughts}\n```\n\n"
|
109 |
+
yield f"data: {json.dumps(create_openai_chunk(formatted_block))}\n\n"
|
110 |
+
thinking_block_sent = True
|
111 |
+
|
112 |
+
yield f"data: {json.dumps(create_openai_chunk(gpt_oss_data['update'].get('delta', '')))}\n\n"
|
113 |
|
114 |
yield "data: [DONE]\n\n"
|
115 |
except IOError as e:
|
116 |
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
|
|
117 |
return Response(stream_formatter(), mimetype='text/event-stream')
|
118 |
+
else:
|
|
|
119 |
try:
|
120 |
full_response_content = ""
|
121 |
for gpt_oss_data in _internal_proxy_stream():
|
|
|
126 |
"id": f"chatcmpl-{str(uuid.uuid4())}", "object": "chat.completion", "created": int(time.time()),
|
127 |
"model": "gpt-oss-120b",
|
128 |
"choices": [{"index": 0, "message": {"role": "assistant", "content": full_response_content.strip()}, "finish_reason": "stop"}],
|
129 |
+
"usage": {}
|
130 |
}
|
131 |
return jsonify(final_response)
|
132 |
except IOError as e:
|