Niansuh commited on
Commit
e848ce6
·
verified ·
1 Parent(s): d233b76

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +113 -27
api/utils.py CHANGED
@@ -7,18 +7,34 @@ import uuid
7
  import httpx
8
  from api import validate
9
  from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE
10
- from fastapi import Depends, security
11
- from fastapi.security import HTTPAuthorizationCredentials
12
 
13
  from api.config import APP_SECRET, BASE_URL
14
  from api.models import ChatRequest
 
15
  from api.logger import setup_logger
16
 
17
  logger = setup_logger(__name__)
18
 
 
 
 
19
  def create_chat_completion_data(
20
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
21
  ) -> Dict[str, Any]:
 
 
 
 
 
 
 
 
 
 
 
 
22
  return {
23
  "id": f"chatcmpl-{uuid.uuid4()}",
24
  "object": "chat.completion.chunk",
@@ -35,11 +51,32 @@ def create_chat_completion_data(
35
  }
36
 
37
  def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
 
 
 
 
 
 
 
 
 
 
 
 
38
  if credentials.credentials != APP_SECRET:
39
- raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
40
  return credentials.credentials
41
 
42
  def message_to_dict(message):
 
 
 
 
 
 
 
 
 
43
  if isinstance(message.content, str):
44
  return {"role": message.role, "content": message.content}
45
  elif isinstance(message.content, list) and len(message.content) == 2:
@@ -56,9 +93,19 @@ def message_to_dict(message):
56
  return {"role": message.role, "content": message.content}
57
 
58
  async def process_streaming_response(request: ChatRequest):
 
 
 
 
 
 
 
 
 
59
  agent_mode = AGENT_MODE.get(request.model, {})
60
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
61
 
 
62
  logger.info(
63
  f"Streaming request for model: '{request.model}', "
64
  f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
@@ -97,23 +144,43 @@ async def process_streaming_response(request: ChatRequest):
97
  timeout=100,
98
  ) as response:
99
  response.raise_for_status()
 
100
  async for line in response.aiter_lines():
101
- timestamp = int(datetime.now().timestamp())
102
- yield f"data: {json.dumps(create_chat_completion_data(line, request.model, timestamp))}\n\n"
103
-
 
 
 
 
 
 
 
 
 
104
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
105
  yield "data: [DONE]\n\n"
106
  except httpx.HTTPStatusError as e:
107
  logger.error(f"HTTP error occurred: {e}")
108
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
109
  except httpx.RequestError as e:
110
  logger.error(f"Error occurred during request: {e}")
111
- raise HTTPException(status_code=500, detail=str(e))
112
 
113
  async def process_non_streaming_response(request: ChatRequest):
 
 
 
 
 
 
 
 
 
114
  agent_mode = AGENT_MODE.get(request.model, {})
115
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
116
 
 
117
  logger.info(
118
  f"Non-streaming request for model: '{request.model}', "
119
  f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
@@ -144,23 +211,42 @@ async def process_non_streaming_response(request: ChatRequest):
144
 
145
  full_response = ""
146
  async with httpx.AsyncClient() as client:
147
- async with client.stream(
148
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
149
- ) as response:
150
- async for chunk in response.aiter_text():
151
- full_response += chunk
152
-
153
- return {
154
- "id": f"chatcmpl-{uuid.uuid4()}",
155
- "object": "chat.completion",
156
- "created": int(datetime.now().timestamp()),
157
- "model": request.model,
158
- "choices": [
159
- {
160
- "index": 0,
161
- "message": {"role": "assistant", "content": full_response},
162
- "finish_reason": "stop",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  }
164
- ],
165
- "usage": None,
166
- }
 
 
 
 
7
  import httpx
8
  from api import validate
9
  from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE
10
+ from fastapi import Depends, HTTPException as FastAPIHTTPException
11
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
12
 
13
  from api.config import APP_SECRET, BASE_URL
14
  from api.models import ChatRequest
15
+
16
  from api.logger import setup_logger
17
 
18
  logger = setup_logger(__name__)
19
 
20
+ # Initialize HTTPBearer for security dependency
21
+ security = HTTPBearer()
22
+
23
  def create_chat_completion_data(
24
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
25
  ) -> Dict[str, Any]:
26
+ """
27
+ Create a dictionary representing a chat completion chunk.
28
+
29
+ Args:
30
+ content (str): The content of the message.
31
+ model (str): The model used for the chat.
32
+ timestamp (int): The timestamp of the creation.
33
+ finish_reason (Optional[str], optional): The reason for finishing. Defaults to None.
34
+
35
+ Returns:
36
+ Dict[str, Any]: A dictionary representing the chat completion chunk.
37
+ """
38
  return {
39
  "id": f"chatcmpl-{uuid.uuid4()}",
40
  "object": "chat.completion.chunk",
 
51
  }
52
 
53
  def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
54
+ """
55
+ Verify the application secret from the HTTP authorization credentials.
56
+
57
+ Args:
58
+ credentials (HTTPAuthorizationCredentials, optional): The HTTP authorization credentials. Defaults to Depends(security).
59
+
60
+ Raises:
61
+ HTTPException: If the APP_SECRET does not match.
62
+
63
+ Returns:
64
+ str: The verified credentials.
65
+ """
66
  if credentials.credentials != APP_SECRET:
67
+ raise FastAPIHTTPException(status_code=403, detail="Invalid APP_SECRET")
68
  return credentials.credentials
69
 
70
  def message_to_dict(message):
71
+ """
72
+ Convert a message object to a dictionary.
73
+
74
+ Args:
75
+ message: The message object to convert.
76
+
77
+ Returns:
78
+ Dict[str, Any]: The dictionary representation of the message.
79
+ """
80
  if isinstance(message.content, str):
81
  return {"role": message.role, "content": message.content}
82
  elif isinstance(message.content, list) and len(message.content) == 2:
 
93
  return {"role": message.role, "content": message.content}
94
 
95
  async def process_streaming_response(request: ChatRequest):
96
+ """
97
+ Process a streaming response from the chat API.
98
+
99
+ Args:
100
+ request (ChatRequest): The chat request containing all necessary information.
101
+
102
+ Yields:
103
+ str: The streaming data chunks formatted as server-sent events.
104
+ """
105
  agent_mode = AGENT_MODE.get(request.model, {})
106
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
107
 
108
+ # Log reduced information
109
  logger.info(
110
  f"Streaming request for model: '{request.model}', "
111
  f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
 
144
  timeout=100,
145
  ) as response:
146
  response.raise_for_status()
147
+ timestamp = int(datetime.now().timestamp())
148
  async for line in response.aiter_lines():
149
+ if line:
150
+ content = line + "\n"
151
+ if "https://www.blackbox.ai" in content:
152
+ validate.getHid(True)
153
+ content = "Hid has been refreshed; feel free to restart the conversation.\n"
154
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
155
+ break
156
+ # Remove the specific pattern without affecting markdown
157
+ content = content.replace("$@$v=undefined-rv1$@$", "")
158
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
159
+
160
+ # Indicate the end of the stream
161
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
162
  yield "data: [DONE]\n\n"
163
  except httpx.HTTPStatusError as e:
164
  logger.error(f"HTTP error occurred: {e}")
165
+ raise FastAPIHTTPException(status_code=e.response.status_code, detail=str(e))
166
  except httpx.RequestError as e:
167
  logger.error(f"Error occurred during request: {e}")
168
+ raise FastAPIHTTPException(status_code=500, detail=str(e))
169
 
170
  async def process_non_streaming_response(request: ChatRequest):
171
+ """
172
+ Process a non-streaming response from the chat API.
173
+
174
+ Args:
175
+ request (ChatRequest): The chat request containing all necessary information.
176
+
177
+ Returns:
178
+ Dict[str, Any]: The full response from the chat API formatted appropriately.
179
+ """
180
  agent_mode = AGENT_MODE.get(request.model, {})
181
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
182
 
183
+ # Log reduced information
184
  logger.info(
185
  f"Non-streaming request for model: '{request.model}', "
186
  f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
 
211
 
212
  full_response = ""
213
  async with httpx.AsyncClient() as client:
214
+ try:
215
+ async with client.stream(
216
+ method="POST",
217
+ url=f"{BASE_URL}/api/chat",
218
+ headers=headers,
219
+ json=json_data,
220
+ timeout=100,
221
+ ) as response:
222
+ response.raise_for_status()
223
+ async for chunk in response.aiter_text():
224
+ full_response += chunk
225
+
226
+ if "https://www.blackbox.ai" in full_response:
227
+ validate.getHid(True)
228
+ full_response = "Hid has been refreshed; feel free to restart the conversation."
229
+
230
+ # Remove the specific pattern without affecting markdown
231
+ full_response = full_response.replace("$@$v=undefined-rv1$@$", "")
232
+
233
+ return {
234
+ "id": f"chatcmpl-{uuid.uuid4()}",
235
+ "object": "chat.completion",
236
+ "created": int(datetime.now().timestamp()),
237
+ "model": request.model,
238
+ "choices": [
239
+ {
240
+ "index": 0,
241
+ "message": {"role": "assistant", "content": full_response},
242
+ "finish_reason": "stop",
243
+ }
244
+ ],
245
+ "usage": None,
246
  }
247
+ except httpx.HTTPStatusError as e:
248
+ logger.error(f"HTTP error occurred: {e}")
249
+ raise FastAPIHTTPException(status_code=e.response.status_code, detail=str(e))
250
+ except httpx.RequestError as e:
251
+ logger.error(f"Error occurred during request: {e}")
252
+ raise FastAPIHTTPException(status_code=500, detail=str(e))