askai / app.py
nanoppa's picture
Update app.py
022bf0c verified
raw
history blame
9.02 kB
from flask import Flask, request, jsonify, Response
import requests
import uuid
import json
import time
import os
import re
import logging
# 配置日志输出(可调整级别为DEBUG以获得更详细日志)
logging.basicConfig(level=logging.INFO)
_COOKIES = ""
API_KEY = os.getenv("API_KEY", "linux.do")
if not API_KEY:
logging.warning("API_KEY 未设置!")
app = Flask(__name__)
@app.before_request
def check_api_key():
auth = request.headers.get("Authorization", "")
if auth != f"Bearer {API_KEY}":
logging.warning("未经授权的访问尝试,传入的 key: %s", auth)
return jsonify({"success": False, "message": "Unauthorized: Invalid API key"}), 403
@app.route('/v1/models', methods=['GET'])
def get_models():
logging.info("收到 /v1/models 请求")
headers = {"Content-Type": "application/json", "Cookie": _COOKIES}
response = requests.get('https://chat.akash.network/api/models', headers=headers)
models_data = response.json()
current_timestamp = int(time.time())
converted_data = {
"object": "list",
"data": [
{
"id": model["id"],
"object": "model",
"created": current_timestamp,
"owned_by": "openai" if "Meta" in model["id"] else "third_party",
"permissions": [],
"root": model["id"],
"parent": None,
"capabilities": {
"temperature": model.get("temperature"),
"top_p": model.get("top_p")
},
"name": model.get("name"),
"description": model.get("description"),
"available": model.get("available")
}
for model in models_data.get("models", [])
]
}
logging.info("返回 /v1/models 响应: %s", json.dumps(converted_data, ensure_ascii=False))
return jsonify(converted_data)
def build_chunk(chat_id, model, token=None, finish_reason=None):
"""
构造单个 chunk 数据,符合 OpenAI API 流式响应格式。
"""
chunk = {
"id": f"chatcmpl-{chat_id}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model,
"choices": [{
"delta": {"content": token} if token is not None else {},
"index": 0,
"finish_reason": finish_reason
}]
}
return chunk
def generate_stream(akash_response, chat_id, model):
"""
解析 Akash 接口的流式响应数据,并生成符合 OpenAI API 格式的 chunk 数据。
"""
for line in akash_response.iter_lines():
if not line:
continue
try:
line_str = line.decode('utf-8').strip()
parts = line_str.split(':', 1)
if len(parts) != 2:
logging.error("流数据格式异常: %s", line_str)
continue
msg_type, msg_data = parts
if msg_type == '0':
token = msg_data.strip()
if token.startswith('"') and token.endswith('"'):
token = token[1:-1].replace('\\"', '"')
token = token.replace("\\n", "\n")
chunk = build_chunk(chat_id, model, token=token, finish_reason=None)
logging.info("流式 chunk: %s", json.dumps(chunk, ensure_ascii=False))
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
elif msg_type in ['e', 'd']:
chunk = build_chunk(chat_id, model, finish_reason="stop")
logging.info("流式结束 chunk: %s", json.dumps(chunk, ensure_ascii=False))
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
yield "data: [DONE]\n\n"
break
except Exception as ex:
logging.error("处理流数据时出错: %s", ex)
continue
@app.route('/v1/chat/completions', methods=['POST'])
def chat_completions():
try:
data = request.get_json()
logging.info("收到 /v1/chat/completions 请求: %s", json.dumps(data, ensure_ascii=False))
chat_id = str(uuid.uuid4()).replace('-', '')[:16]
model = data.get('model', "DeepSeek-R1")
akash_data = {
"id": chat_id,
"messages": data.get('messages', []),
"model": model,
"system": data.get('system_message', "You are a helpful assistant."),
"temperature": data.get('temperature', 0.6),
"topP": data.get('top_p', 0.95)
}
headers = {"Content-Type": "application/json", "Cookie": _COOKIES}
# 默认启用 stream 模式,但针对 AkashGen 模型关闭流式响应
stream_flag = data.get('stream', True)
if model == "AkashGen":
stream_flag = False
akash_response = requests.post(
'https://chat.akash.network/api/chat',
json=akash_data,
headers=headers,
stream=stream_flag
)
logging.info("Akash API 响应状态: %s", akash_response.status_code)
if stream_flag:
return Response(
generate_stream(akash_response, chat_id, model),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'Connection': 'keep-alive'
}
)
else:
# 非流式响应处理
if model != "AkashGen":
text_matches = re.findall(r'0:"(.*?)"', akash_response.text)
parsed_text = "".join(text_matches)
response_payload = {
"object": "chat.completion",
"created": int(time.time() * 1000),
"model": model,
"choices": [{
"index": 0,
"message": {"role": "assistant", "content": parsed_text},
"finish_reason": "stop"
}]
}
logging.info("非流式响应 payload: %s", json.dumps(response_payload, ensure_ascii=False))
return Response(
json.dumps(response_payload, ensure_ascii=False),
status=akash_response.status_code,
mimetype='application/json'
)
else:
match = re.search(r"jobId='([^']+)'", akash_response.text)
if match:
job_id = match.group(1)
logging.info("AkashGen jobId: %s", job_id)
# 轮询图片生成状态
while True:
try:
img_response = requests.get(
f'https://chat.akash.network/api/image-status?ids={job_id}',
headers=headers
)
img_data = img_response.json()
if img_data and img_data[0]["status"] == "completed":
response_payload = {
"object": "chat.completion",
"created": int(time.time() * 1000),
"model": model,
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": f"根据您的描述,这里是一张生成的图片:\n\n![生成的图片]({img_data[0]['result']})"
},
"finish_reason": "stop"
}]
}
logging.info("AkashGen 完成后的 payload: %s", json.dumps(response_payload, ensure_ascii=False))
return Response(
json.dumps(response_payload, ensure_ascii=False),
status=akash_response.status_code,
mimetype='application/json'
)
else:
logging.info("图片生成中,jobId: %s", job_id)
except Exception as e:
logging.error("请求图片状态异常: %s", e)
time.sleep(5)
else:
logging.error("未能解析到 jobId")
return jsonify({"error": "当前官方服务异常"}), 500
except Exception as e:
logging.exception("chat_completions 处理过程中出现异常:")
return jsonify({"error": str(e)}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5200)