letterm commited on
Commit
d69f65d
·
verified ·
1 Parent(s): af5fed1

Update api_service.py

Browse files
Files changed (1) hide show
  1. api_service.py +9 -9
api_service.py CHANGED
@@ -90,7 +90,7 @@ class ApiService:
90
 
91
  if stream:
92
  # 流式响应
93
- chunk_response = self._create_stream_chunk(chunk_text, request_id)
94
  yield f"data: {json.dumps(chunk_response)}\n\n"
95
  else:
96
  # 非流式响应 - 等待完整内容
@@ -102,14 +102,14 @@ class ApiService:
102
 
103
  if stream:
104
  # 发送结束标记
105
- final_chunk = self._create_stream_end_chunk(request_id)
106
  yield f"data: {json.dumps(final_chunk)}\n\n"
107
  yield "data: [DONE]\n\n"
108
 
109
  logger.success(f"✅ 流式响应完成 [ID: {request_id[:8]}] [块数: {response_chunks}] [耗时: {duration:.2f}s]")
110
  else:
111
  # 返回完整响应
112
- response = self._create_complete_response(total_content, request_id)
113
  yield response
114
 
115
  logger.success(f"✅ 完整响应完成 [ID: {request_id[:8]}] [长度: {len(total_content)}] [耗时: {duration:.2f}s]")
@@ -118,13 +118,13 @@ class ApiService:
118
  logger.error(f"❌ 聊天请求处理失败 [ID: {request_id[:8]}]: {e}")
119
  yield self._create_error_response(f"服务器内部错误: {str(e)}", request_id)
120
 
121
- def _create_stream_chunk(self, content: str, request_id: str) -> Dict[str, Any]:
122
  """创建流式响应块"""
123
  return {
124
  "id": f"chatcmpl-{request_id}",
125
  "object": "chat.completion.chunk",
126
  "created": int(time.time()),
127
- "model": "gemini-2.0-flash",
128
  "choices": [{
129
  "index": 0,
130
  "delta": {"content": content},
@@ -132,13 +132,13 @@ class ApiService:
132
  }]
133
  }
134
 
135
- def _create_stream_end_chunk(self, request_id: str) -> Dict[str, Any]:
136
  """创建流式响应结束块"""
137
  return {
138
  "id": f"chatcmpl-{request_id}",
139
  "object": "chat.completion.chunk",
140
  "created": int(time.time()),
141
- "model": "gemini-2.0-flash",
142
  "choices": [{
143
  "index": 0,
144
  "delta": {},
@@ -146,13 +146,13 @@ class ApiService:
146
  }]
147
  }
148
 
149
- def _create_complete_response(self, content: str, request_id: str) -> Dict[str, Any]:
150
  """创建完整响应"""
151
  return {
152
  "id": f"chatcmpl-{request_id}",
153
  "object": "chat.completion",
154
  "created": int(time.time()),
155
- "model": "gemini-2.0-flash",
156
  "choices": [{
157
  "index": 0,
158
  "message": {
 
90
 
91
  if stream:
92
  # 流式响应
93
+ chunk_response = self._create_stream_chunk(chunk_text, request_id, model)
94
  yield f"data: {json.dumps(chunk_response)}\n\n"
95
  else:
96
  # 非流式响应 - 等待完整内容
 
102
 
103
  if stream:
104
  # 发送结束标记
105
+ final_chunk = self._create_stream_end_chunk(request_id, model)
106
  yield f"data: {json.dumps(final_chunk)}\n\n"
107
  yield "data: [DONE]\n\n"
108
 
109
  logger.success(f"✅ 流式响应完成 [ID: {request_id[:8]}] [块数: {response_chunks}] [耗时: {duration:.2f}s]")
110
  else:
111
  # 返回完整响应
112
+ response = self._create_complete_response(total_content, request_id, model)
113
  yield response
114
 
115
  logger.success(f"✅ 完整响应完成 [ID: {request_id[:8]}] [长度: {len(total_content)}] [耗时: {duration:.2f}s]")
 
118
  logger.error(f"❌ 聊天请求处理失败 [ID: {request_id[:8]}]: {e}")
119
  yield self._create_error_response(f"服务器内部错误: {str(e)}", request_id)
120
 
121
+ def _create_stream_chunk(self, content: str, request_id: str, model: str) -> Dict[str, Any]:
122
  """创建流式响应块"""
123
  return {
124
  "id": f"chatcmpl-{request_id}",
125
  "object": "chat.completion.chunk",
126
  "created": int(time.time()),
127
+ "model": model,
128
  "choices": [{
129
  "index": 0,
130
  "delta": {"content": content},
 
132
  }]
133
  }
134
 
135
+ def _create_stream_end_chunk(self, request_id: str, model: str) -> Dict[str, Any]:
136
  """创建流式响应结束块"""
137
  return {
138
  "id": f"chatcmpl-{request_id}",
139
  "object": "chat.completion.chunk",
140
  "created": int(time.time()),
141
+ "model": model,
142
  "choices": [{
143
  "index": 0,
144
  "delta": {},
 
146
  }]
147
  }
148
 
149
+ def _create_complete_response(self, content: str, request_id: str, model: str) -> Dict[str, Any]:
150
  """创建完整响应"""
151
  return {
152
  "id": f"chatcmpl-{request_id}",
153
  "object": "chat.completion",
154
  "created": int(time.time()),
155
+ "model": model,
156
  "choices": [{
157
  "index": 0,
158
  "message": {