smgc commited on
Commit
20f980f
·
verified ·
1 Parent(s): 535a218

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -231
app.py CHANGED
@@ -14,14 +14,15 @@ app = Flask(__name__)
14
  # 配置日志
15
  class RequestFormatter(logging.Formatter):
16
  def format(self, record):
17
- if request.method == 'POST':
18
  record.url = request.url
19
  record.remote_addr = request.remote_addr
 
20
  return super().format(record)
21
  return None
22
 
23
  formatter = RequestFormatter(
24
- '%(remote_addr)s - - [%(asctime)s] - %(message)s',
25
  datefmt='%d/%b/%Y %H:%M:%S'
26
  )
27
 
@@ -60,132 +61,47 @@ MODEL_MAPPING = {
60
  }
61
  }
62
 
63
- SYSTEM_ASSISTANT = """作为 Stable Diffusion Prompt 提示词专家,您将从关键词中创建提示,通常来自 Danbooru 等数据库。
64
- 提示通常描述图像,使用常见词汇,按重要性排列,并用逗号分隔。避免使用"-"或".",但可以接受空格和自然语言。避免词汇重复。
 
 
 
 
65
 
66
- 为了强调关键词,请将其放在括号中以增加其权重。例如,"(flowers)"将'flowers'的权重增加1.1倍,而"(((flowers)))"将其增加1.331倍。使用"(flowers:1.5)"将'flowers'的权重增加1.5倍。只为重要的标签增加权重。
67
-
68
- 提示包括三个部分:**前缀**(质量标签+风格词+效果器)+ **主题**(图像的主要焦点)+ **场景**(背景、环境)。
69
-
70
- * 前缀影响图像质量。像"masterpiece"、"best quality"、"4k"这样的标签可以提高图像的细节。像"illustration"、"lensflare"这样的风格词定义图像的风格。像"bestlighting"、"lensflare"、"depthoffield"这样的效果器会影响光照和深度。
71
-
72
- * 主题是图像的主要焦点,如角色或场景。对主题进行详细描述可以确保图像丰富而详细。增加主题的权重以增强其清晰度。对于角色,描述面部、头发、身体、服装、姿势等特征。
73
-
74
- * 场景描述环境。没有场景,图像的背景是平淡的,主题显得过大。某些主题本身包含场景(例如建筑物、风景)。像"花草草地"、"阳光"、"河流"这样的环境词可以丰富场景。你的任务是设计图像生成的提示。请按照以下步骤进行操作:
75
-
76
- 1. 我会发送给您一个图像场景。需要你生成详细的图像描述
77
- 2. 图像描述必须是英文,输出为Positive Prompt。
78
-
79
- 示例:
80
-
81
- 我发送:二战时期的护士。
82
- 您回复只回复:
83
- A WWII-era nurse in a German uniform, holding a wine bottle and stethoscope, sitting at a table in white attire, with a table in the background, masterpiece, best quality, 4k, illustration style, best lighting, depth of field, detailed character, detailed environment.
84
- """
85
-
86
- RATIO_MAP = {
87
- "1:1": "1024x1024",
88
- "1:2": "1024x2048",
89
- "3:2": "1536x1024",
90
- "4:3": "1536x2048",
91
- "16:9": "2048x1152",
92
- "9:16": "1152x2048"
93
- }
94
-
95
- def get_random_token(auth_header):
96
- if not auth_header:
97
- return None
98
- if auth_header.startswith('Bearer '):
99
- auth_header = auth_header[7:]
100
- tokens = [token.strip() for token in auth_header.split(',') if token.strip()]
101
- if not tokens:
102
- return None
103
- return random.choice(tokens)
104
-
105
- def translate_and_enhance_prompt(prompt, auth_token):
106
- translate_url = 'https://api.siliconflow.cn/v1/chat/completions'
107
- translate_body = {
108
- 'model': 'Qwen/Qwen2-72B-Instruct',
109
- 'messages': [
110
- {'role': 'system', 'content': SYSTEM_ASSISTANT},
111
- {'role': 'user', 'content': prompt}
112
  ]
113
- }
114
- headers = {
115
- 'Content-Type': 'application/json',
116
- 'Authorization': f'Bearer {auth_token}'
117
- }
118
-
119
- response = requests.post(translate_url, headers=headers, json=translate_body, timeout=30)
120
- response.raise_for_status()
121
- result = response.json()
122
- return result['choices'][0]['message']['content']
123
-
124
- def extract_params_from_prompt(prompt):
125
- size_match = re.search(r'-s\s+(\S+)', prompt)
126
- original_match = re.search(r'-o', prompt)
127
-
128
- if size_match:
129
- size = size_match.group(1)
130
- clean_prompt = re.sub(r'-s\s+\S+', '', prompt).strip()
131
- else:
132
- size = "16:9"
133
- clean_prompt = prompt
134
-
135
- use_original = bool(original_match)
136
- if use_original:
137
- clean_prompt = re.sub(r'-o', '', clean_prompt).strip()
138
-
139
- image_size = RATIO_MAP.get(size, RATIO_MAP["16:9"])
140
- return image_size, clean_prompt, use_original, size
141
-
142
- @app.route('/')
143
- def index():
144
- usage = """
145
- <html>
146
- <head>
147
- <title>Text-to-Image API with SiliconFlow</title>
148
- <style>
149
- body { font-family: Arial, sans-serif; line-height: 1.6; padding: 20px; max-width: 800px; margin: 0 auto; }
150
- h1 { color: #333; }
151
- h2 { color: #666; }
152
- pre { background-color: #f4f4f4; padding: 10px; border-radius: 5px; }
153
- code { font-family: Consolas, monospace; }
154
- </style>
155
- </head>
156
- <body>
157
- <h1>Welcome to the Text-to-Image API with SiliconFlow!</h1>
158
 
159
- <h2>Usage:</h2>
160
- <ol>
161
- <li>Send a POST request to <code>/ai/v1/chat/completions</code></li>
162
- <li>Include your prompt in the 'content' field of the last message</li>
163
- <li>Optional parameters:
164
- <ul>
165
- <li><code>-s &lt;ratio&gt;</code>: Set image size ratio (e.g., -s 1:1, -s 16:9)</li>
166
- <li><code>-o</code>: Use original prompt without enhancement</li>
167
- </ul>
168
- </li>
169
- </ol>
170
-
171
- <h2>Example Request:</h2>
172
- <pre><code>
173
- {
174
- "model": "flux",
175
- "messages": [
176
- {
177
- "role": "user",
178
- "content": "A beautiful landscape -s 16:9"
179
- }
180
- ]
181
- }
182
- </code></pre>
183
-
184
- <p>For more details, please refer to the API documentation.</p>
185
- </body>
186
- </html>
187
- """
188
- return usage, 200
189
 
190
  @app.route('/ai/v1/chat/completions', methods=['POST'])
191
  def handle_request():
@@ -254,7 +170,7 @@ def handle_request():
254
  params.append("-o")
255
  params_str = " ".join(params) if params else "no params"
256
 
257
- app.logger.info(f"Status: 200 - Token: {random_token} - Model: {mapped_model} - Params: {params_str} - Image URL: {image_url}")
258
 
259
  if stream:
260
  return stream_response(unique_id, image_data, clean_prompt, enhanced_prompt, image_size, current_timestamp, model, system_fingerprint, use_original)
@@ -264,111 +180,8 @@ def handle_request():
264
  app.logger.error(f"Error: {str(e)}")
265
  return jsonify({"error": f"Internal Server Error: {str(e)}"}), 500
266
 
267
- @app.route('/ai/v1/models', methods=['GET'])
268
- def get_models():
269
- models_list = [
270
- {
271
- "id": key,
272
- "object": "model",
273
- "owned_by": value["provider"],
274
- "mapping": value["mapping"]
275
- }
276
- for key, value in MODEL_MAPPING.items()
277
- ]
278
-
279
- response = {
280
- "object": "list",
281
- "data": models_list
282
- }
283
-
284
- return jsonify(response)
285
-
286
- def stream_response(unique_id, image_data, original_prompt, translated_prompt, size, created, model, system_fingerprint, use_original):
287
- return Response(stream_with_context(generate_stream(unique_id, image_data, original_prompt, translated_prompt, size, created, model, system_fingerprint, use_original)), content_type='text/event-stream')
288
-
289
- def generate_stream(unique_id, image_data, original_prompt, translated_prompt, size, created, model, system_fingerprint, use_original):
290
- chunks = [
291
- f"原始提示词:\n{original_prompt}\n",
292
- ]
293
-
294
- if not use_original:
295
- chunks.append(f"翻译后的提示词:\n{translated_prompt}\n")
296
-
297
- chunks.extend([
298
- f"图像规格:{size}\n",
299
- "正在根据提示词生成图像...\n",
300
- "图像正在处理中...\n",
301
- "即将完成...\n",
302
- f"生成成功!\n图像生成完毕,以下是结果:\n\n![生成的图像]({image_data['data'][0]['url']})"
303
- ])
304
-
305
- for i, chunk in enumerate(chunks):
306
- json_chunk = json.dumps({
307
- "id": unique_id,
308
- "object": "chat.completion.chunk",
309
- "created": created,
310
- "model": model,
311
- "system_fingerprint": system_fingerprint,
312
- "choices": [{
313
- "index": 0,
314
- "delta": {"content": chunk},
315
- "logprobs": None,
316
- "finish_reason": None
317
- }]
318
- })
319
- yield f"data: {json_chunk}\n\n"
320
- time.sleep(0.5) # 模拟生成时���
321
-
322
- final_chunk = json.dumps({
323
- "id": unique_id,
324
- "object": "chat.completion.chunk",
325
- "created": created,
326
- "model": model,
327
- "system_fingerprint": system_fingerprint,
328
- "choices": [{
329
- "index": 0,
330
- "delta": {},
331
- "logprobs": None,
332
- "finish_reason": "stop"
333
- }]
334
- })
335
- yield f"data: {final_chunk}\n\n"
336
-
337
- def non_stream_response(unique_id, image_data, original_prompt, translated_prompt, size, created, model, system_fingerprint, use_original):
338
- content = f"原始提示词:{original_prompt}\n"
339
-
340
- if not use_original:
341
- content += f"翻译后的提示词:{translated_prompt}\n"
342
-
343
- content += (
344
- f"图像规格:{size}\n"
345
- f"图像生成成功!\n"
346
- f"以下是结果:\n\n"
347
- f"![生成的图像]({image_data['data'][0]['url']})"
348
- )
349
-
350
- response = {
351
- 'id': unique_id,
352
- 'object': "chat.completion",
353
- 'created': created,
354
- 'model': model,
355
- 'system_fingerprint': system_fingerprint,
356
- 'choices': [{
357
- 'index': 0,
358
- 'message': {
359
- 'role': "assistant",
360
- 'content': content
361
- },
362
- 'finish_reason': "stop"
363
- }],
364
- 'usage': {
365
- 'prompt_tokens': len(original_prompt),
366
- 'completion_tokens': len(content),
367
- 'total_tokens': len(original_prompt) + len(content)
368
- }
369
- }
370
-
371
- return jsonify(response)
372
 
373
  if __name__ == '__main__':
374
  app.run(host='0.0.0.0', port=8000)
 
14
  # 配置日志
15
  class RequestFormatter(logging.Formatter):
16
  def format(self, record):
17
+ if request.method in ['POST', 'GET']: # 记录 POST 和 GET 请求
18
  record.url = request.url
19
  record.remote_addr = request.remote_addr
20
+ record.token = request.headers.get('Authorization', 'No Token')
21
  return super().format(record)
22
  return None
23
 
24
  formatter = RequestFormatter(
25
+ '%(remote_addr)s - - [%(asctime)s] - Token: %(token)s - %(message)s',
26
  datefmt='%d/%b/%Y %H:%M:%S'
27
  )
28
 
 
61
  }
62
  }
63
 
64
+ # 模拟身份验证函数
65
+ def getAuthCookie(req):
66
+ auth_cookie = req.headers.get('Authorization')
67
+ if auth_cookie and auth_cookie.startswith('Bearer '):
68
+ return auth_cookie
69
+ return None
70
 
71
+ @app.route('/ai/v1/models', methods=['GET'])
72
+ def get_models():
73
+ try:
74
+ # 验证身份
75
+ auth_cookie = getAuthCookie(request)
76
+ if not auth_cookie:
77
+ app.logger.info(f'GET /ai/v1/models - 401 Unauthorized')
78
+ return jsonify({"error": "Unauthorized"}), 401
79
+
80
+ # 返回模型列表
81
+ models_list = [
82
+ {
83
+ "id": model_id,
84
+ "object": "model",
85
+ "created": int(time.time()),
86
+ "owned_by": info["provider"],
87
+ "permission": [],
88
+ "root": model_id,
89
+ "parent": None
90
+ }
91
+ for model_id, info in MODEL_MAPPING.items()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # 记录日志
95
+ app.logger.info(f'GET /ai/v1/models - 200 OK')
96
+
97
+ return jsonify({
98
+ "object": "list",
99
+ "data": models_list
100
+ })
101
+
102
+ except Exception as error:
103
+ app.logger.error(f"Error: {str(error)}")
104
+ return jsonify({"error": "Authentication failed", "details": str(error)}), 401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  @app.route('/ai/v1/chat/completions', methods=['POST'])
107
  def handle_request():
 
170
  params.append("-o")
171
  params_str = " ".join(params) if params else "no params"
172
 
173
+ app.logger.info(f'POST /ai/v1/chat/completions - Status: 200 - Token: {random_token} - Model: {mapped_model} - Params: {params_str} - Image URL: {image_url}')
174
 
175
  if stream:
176
  return stream_response(unique_id, image_data, clean_prompt, enhanced_prompt, image_size, current_timestamp, model, system_fingerprint, use_original)
 
180
  app.logger.error(f"Error: {str(e)}")
181
  return jsonify({"error": f"Internal Server Error: {str(e)}"}), 500
182
 
183
+ # 其余代码保持不变
184
+ # 例如 stream_response, generate_stream, non_stream_response 等函数
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  if __name__ == '__main__':
187
  app.run(host='0.0.0.0', port=8000)