dfa32412 commited on
Commit
021065a
·
verified ·
1 Parent(s): 1b38468

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +165 -157
  2. requirements.txt +3 -3
app.py CHANGED
@@ -1,157 +1,165 @@
1
- # encoding:utf-8
2
- from flask import Flask, request, Response, stream_with_context, jsonify
3
- import requests
4
- import json
5
- from flask_cors import CORS, cross_origin
6
- import time
7
-
8
- app = Flask(__name__)
9
- models = ["gpt-4o","gemini-pro","claude-sonnet-3.5","deepseek-v3","deepseek-r1","blackboxai-pro"]
10
-
11
- # OpenAI API的URL
12
- OPENAI_API_URL = "https://www.blackbox.ai/api/chat"
13
-
14
- @app.route('/hf/v1/chat/completions', methods=['POST'])
15
- @cross_origin(origin='*') # 允许所有域的请求
16
- def openai_chat_stream():
17
- # 获取客户端发送的数据
18
- data = request.json
19
- null = None
20
- true = True
21
- false = False
22
-
23
- model = data["model"]
24
-
25
- if model not in models:
26
- return "不支持该模型", 500
27
-
28
- messages = data["messages"]
29
- prompt = None
30
- for m in messages:
31
- if m["role"] == "system":
32
- prompt = m["content"]
33
- break
34
-
35
- requestBody = {"messages":messages,"agentMode":{},"id":"NEX4Hei",
36
- "previewToken":null,"userId":null,"codeModelMode":true,
37
- "trendingAgentMode":{},"isMicMode":false,
38
- "userSystemPrompt":prompt,"maxTokens":10240,
39
- "playgroundTopP":null,"playgroundTemperature":null,
40
- "isChromeExt":false,"githubToken":"","clickedAnswer2":false,
41
- "clickedAnswer3":false,"clickedForceWebSearch":false,
42
- "visitFromDelta":false,"isMemoryEnabled":false,
43
- "mobileClient":false,
44
- "userSelectedModel":model,
45
- "validated":"00f37b34-a166-4efb-bce5-1312d87f2f94",
46
- "imageGenerationMode":false,"webSearchModePrompt":false,
47
- "deepSearchMode":false,"domains":null,"vscodeClient":false}
48
- # requestBody = json.dumps(requestBody)
49
-
50
-
51
- # 构建请求头
52
- headers = {
53
- "Content-Type": "application/json",
54
- "origin": "https://www.blackbox.ai",
55
- "referer": "https://www.blackbox.ai",
56
- "Accept": "*/*",
57
- "Connection": "keep-alive",
58
- # 'content-type': str(len(requestBody)),
59
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"
60
- }
61
-
62
- try:
63
- enable_stream = False
64
- if "stream" in data:
65
- enable_stream = data["stream"]
66
-
67
-
68
- # 发送HTTP请求到OpenAI API
69
- response = requests.post(
70
- OPENAI_API_URL,
71
- headers=headers,
72
- json=requestBody,
73
- stream=enable_stream # 启用流式传输
74
- )
75
-
76
- # 检查响应状态码
77
- if response.status_code != 200:
78
- return jsonify(
79
- {'error': f"OpenAI API error: {response.status_code}, {response.text}"}), response.status_code
80
-
81
- if enable_stream:
82
- # 使用stream_with_context来处理流式响应
83
- def generate():
84
- size = 0
85
- for chunk in response.iter_lines():
86
-
87
- if chunk:
88
- text = chunk.decode("utf-8")
89
- body = {
90
- "id": "chatcmpl-5e9568c7-31d8-4a25-9eaf-ac5f872fb461",
91
- "object": "chat.completion.chunk",
92
- "created": time.time(),
93
- "model": data["model"],
94
- "choices": [
95
- {
96
- "index": 0,
97
- "delta": {
98
- "content": text,
99
- "role": "assistant"
100
- },
101
- "finish_reason": null
102
- }
103
- ],
104
- "usage": null
105
- }
106
-
107
- # # 解析OpenAI的流式响应
108
- yield "data:" + json.dumps(body,ensure_ascii=False) + "\n\n"
109
- yield "[DONE]"
110
- return Response(stream_with_context(generate()), content_type='text/event-stream')
111
- else:
112
- text = ""
113
- for chunk in response.iter_lines():
114
-
115
- if chunk:
116
- text += chunk.decode("utf-8") + "\n\n"
117
-
118
- return jsonify({
119
- "id": "chatcmpl-5e9568c7-31d8-4a25-9eaf-ac5f872fb461",
120
- "object": "chat.completion.chunk",
121
- "created": time.time(),
122
- "model": data["model"],
123
- "choices": [
124
- {
125
- "index": 0,
126
- "message": {
127
- "content": text,
128
- "role": "assistant"
129
- },
130
- "finish_reason": null
131
- }
132
- ],
133
- "usage": null
134
- })
135
- # # 解析OpenAI的流式响应
136
-
137
-
138
-
139
-
140
- except Exception as e:
141
- return jsonify({'error': str(e)}), 500
142
-
143
- @app.route('/hf/v1/models', methods=['POST','GET'])
144
- @cross_origin(origin='*') # 允许所有域的请求
145
- def get_models():
146
- data = []
147
- for model in models:
148
- data.append({"id": model,
149
- "object": "model"})
150
-
151
- return jsonify({
152
- "data": data
153
- })
154
-
155
-
156
- if __name__ == '__main__':
157
- app.run(host='0.0.0.0', port=7860)
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, Response, json
2
+ import requests
3
+ from uuid import uuid4
4
+ import time
5
+ import os
6
+
7
+ app = Flask(__name__)
8
+ AUTH_TOKEN = os.getenv('AUTH_TOKEN')
9
+
10
+ MODEL_MAPPING = {
11
+ "deepseek": "deepseek/deepseek-chat",
12
+ "gpt-4o-mini": "openai/gpt-4o-mini",
13
+ "gemini-flash-1.5": "google/gemini-flash-1.5",
14
+ "deepseek-reasoner": "deepseek-reasoner",
15
+ "minimax-01": "minimax/minimax-01"
16
+ }
17
+
18
+ def make_heck_request(question, session_id, messages, actual_model):
19
+ previous_question = previous_answer = None
20
+ if len(messages) >= 2:
21
+ for i in range(len(messages)-2, -1, -1):
22
+ if messages[i]["role"] == "user":
23
+ previous_question = messages[i]["content"]
24
+ if i+1 < len(messages) and messages[i+1]["role"] == "assistant":
25
+ previous_answer = messages[i+1]["content"]
26
+ break
27
+
28
+ payload = {
29
+ "model": actual_model,
30
+ "question": question,
31
+ "language": "Chinese",
32
+ "sessionId": session_id,
33
+ "previousQuestion": previous_question,
34
+ "previousAnswer": previous_answer
35
+ }
36
+
37
+ headers = {
38
+ "Content-Type": "application/json",
39
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
40
+ }
41
+
42
+ return requests.post(
43
+ "https://gateway.aiapilab.com/api/ha/v1/chat",
44
+ json=payload,
45
+ headers=headers,
46
+ stream=True
47
+ )
48
+
49
+ def stream_response(question, session_id, messages, request_model, actual_model):
50
+ resp = make_heck_request(question, session_id, messages, actual_model)
51
+ is_answering = False
52
+
53
+ for line in resp.iter_lines():
54
+ if line:
55
+ line = line.decode('utf-8')
56
+ if not line.startswith('data: '):
57
+ continue
58
+
59
+ content = line[6:].strip()
60
+
61
+ if content == "[ANSWER_START]":
62
+ is_answering = True
63
+ chunk = {
64
+ "id": session_id,
65
+ "object": "chat.completion.chunk",
66
+ "created": int(time.time()),
67
+ "model": request_model,
68
+ "choices": [{
69
+ "index": 0,
70
+ "delta": {"role": "assistant"},
71
+ }]
72
+ }
73
+ yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
74
+ continue
75
+
76
+ if content == "[ANSWER_DONE]":
77
+ chunk = {
78
+ "id": session_id,
79
+ "object": "chat.completion.chunk",
80
+ "created": int(time.time()),
81
+ "model": request_model,
82
+ "choices": [{
83
+ "index": 0,
84
+ "delta": {},
85
+ "finish_reason": "stop"
86
+ }]
87
+ }
88
+ yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
89
+ break
90
+
91
+ if is_answering and content and not content.startswith("[RELATE_Q"):
92
+ chunk = {
93
+ "id": session_id,
94
+ "object": "chat.completion.chunk",
95
+ "created": int(time.time()),
96
+ "model": request_model,
97
+ "choices": [{
98
+ "index": 0,
99
+ "delta": {"content": content},
100
+ }]
101
+ }
102
+ yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
103
+
104
+ def normal_response(question, session_id, messages, request_model, actual_model):
105
+ resp = make_heck_request(question, session_id, messages, actual_model)
106
+ full_content = []
107
+ is_answering = False
108
+
109
+ for line in resp.iter_lines():
110
+ if line:
111
+ line = line.decode('utf-8')
112
+ if line.startswith('data: '):
113
+ content = line[6:].strip()
114
+ if content == "[ANSWER_START]":
115
+ is_answering = True
116
+ elif content == "[ANSWER_DONE]":
117
+ break
118
+ elif is_answering:
119
+ full_content.append(content)
120
+
121
+ response = {
122
+ "id": session_id,
123
+ "object": "chat.completion",
124
+ "created": int(time.time()),
125
+ "model": request_model,
126
+ "choices": [{
127
+ "index": 0,
128
+ "message": {
129
+ "role": "assistant",
130
+ "content": "".join(full_content)
131
+ },
132
+ "finish_reason": "stop"
133
+ }]
134
+ }
135
+ return response
136
+
137
+ @app.route("/hf/v1/chat/completions", methods=["POST"])
138
+ def chat_completions():
139
+ # 验证Token
140
+ if AUTH_TOKEN:
141
+ auth_header = request.headers.get('Authorization')
142
+ if not auth_header or auth_header != f"Bearer {AUTH_TOKEN}":
143
+ return {"error": "Unauthorized"}, 401
144
+
145
+ data = request.json
146
+ model = MODEL_MAPPING.get(data["model"])
147
+ if not model:
148
+ return {"error": "Unsupported Model"}, 400
149
+
150
+ question = next((msg["content"] for msg in reversed(data["messages"])
151
+ if msg["role"] == "user"), None)
152
+ session_id = str(uuid4())
153
+
154
+ if data.get("stream"):
155
+ return Response(
156
+ stream_response(question, session_id, data["messages"],
157
+ data["model"], model),
158
+ mimetype="text/event-stream"
159
+ )
160
+ else:
161
+ return normal_response(question, session_id, data["messages"],
162
+ data["model"], model)
163
+
164
+ if __name__ == "__main__":
165
+ app.run(host='0.0.0.0', port=7860)
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- Flask==3.0.3
2
- requests
3
- flask_cors
 
1
+ flask
2
+ requests
3
+ uuid