Niansuh commited on
Commit
9f6bc04
·
verified ·
1 Parent(s): 0ea73f2

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +11 -2
api/utils.py CHANGED
@@ -79,6 +79,9 @@ async def process_streaming_response(request: ChatRequest):
79
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
80
  model_prefix = MODEL_PREFIXES.get(request.model, "")
81
 
 
 
 
82
  json_data = {
83
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
84
  "previewToken": None,
@@ -99,7 +102,7 @@ async def process_streaming_response(request: ChatRequest):
99
  "visitFromDelta": False,
100
  "mobileClient": False,
101
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
102
- "validated": validate.getHid()
103
  }
104
 
105
  async with httpx.AsyncClient() as client:
@@ -119,6 +122,7 @@ async def process_streaming_response(request: ChatRequest):
119
  if "https://www.blackbox.ai" in content:
120
  validate.getHid(True)
121
  content = "hid已刷新,重新对话即可\n"
 
122
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
123
  break
124
  if content.startswith("$@$v=undefined-rv1$@$"):
@@ -141,6 +145,9 @@ async def process_non_streaming_response(request: ChatRequest):
141
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
142
  model_prefix = MODEL_PREFIXES.get(request.model, "")
143
 
 
 
 
144
  json_data = {
145
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
146
  "previewToken": None,
@@ -161,7 +168,7 @@ async def process_non_streaming_response(request: ChatRequest):
161
  "visitFromDelta": False,
162
  "mobileClient": False,
163
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
164
- "validated": validate.getHid()
165
  }
166
  full_response = ""
167
  async with httpx.AsyncClient() as client:
@@ -182,6 +189,8 @@ async def process_non_streaming_response(request: ChatRequest):
182
  if "https://www.blackbox.ai" in full_response:
183
  validate.getHid(True)
184
  full_response = "hid已刷新,重新对话即可"
 
 
185
  if full_response.startswith("$@$v=undefined-rv1$@$"):
186
  full_response = full_response[21:]
187
  # Strip model prefix from full_response
 
79
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
80
  model_prefix = MODEL_PREFIXES.get(request.model, "")
81
 
82
+ hid = validate.getHid()
83
+ logger.info(f"Using hid: {hid} for model: {request.model}")
84
+
85
  json_data = {
86
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
87
  "previewToken": None,
 
102
  "visitFromDelta": False,
103
  "mobileClient": False,
104
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
105
+ "validated": hid
106
  }
107
 
108
  async with httpx.AsyncClient() as client:
 
122
  if "https://www.blackbox.ai" in content:
123
  validate.getHid(True)
124
  content = "hid已刷新,重新对话即可\n"
125
+ logger.info(f"hid refreshed due to content: {content}")
126
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
127
  break
128
  if content.startswith("$@$v=undefined-rv1$@$"):
 
145
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
146
  model_prefix = MODEL_PREFIXES.get(request.model, "")
147
 
148
+ hid = validate.getHid()
149
+ logger.info(f"Using hid: {hid} for model: {request.model}")
150
+
151
  json_data = {
152
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
153
  "previewToken": None,
 
168
  "visitFromDelta": False,
169
  "mobileClient": False,
170
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
171
+ "validated": hid
172
  }
173
  full_response = ""
174
  async with httpx.AsyncClient() as client:
 
189
  if "https://www.blackbox.ai" in full_response:
190
  validate.getHid(True)
191
  full_response = "hid已刷新,重新对话即可"
192
+ logger.info("hid refreshed due to response content")
193
+
194
  if full_response.startswith("$@$v=undefined-rv1$@$"):
195
  full_response = full_response[21:]
196
  # Strip model prefix from full_response