Niansuh commited on
Commit
c0af3b0
·
verified ·
1 Parent(s): 61affc1

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +58 -17
api/utils.py CHANGED
@@ -10,21 +10,20 @@ import httpx
10
  from fastapi import HTTPException
11
  from api.config import (
12
  MODEL_MAPPING,
13
- common_headers,
 
14
  BASE_URL,
15
  AGENT_MODE,
16
  TRENDING_AGENT_MODE,
 
 
 
17
  )
18
  from api.models import ChatRequest
19
  from api.logger import setup_logger
20
 
21
  logger = setup_logger(__name__)
22
 
23
- # Helper function to create a random alphanumeric chat ID
24
- def generate_chat_id(length: int = 7) -> str:
25
- characters = string.ascii_letters + string.digits
26
- return ''.join(random.choices(characters, k=length))
27
-
28
  # Helper function to create chat completion data
29
  def create_chat_completion_data(
30
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
@@ -44,10 +43,13 @@ def create_chat_completion_data(
44
  "usage": None,
45
  }
46
 
47
- # Function to convert message to dictionary format
48
- def message_to_dict(message):
49
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
 
 
50
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
 
51
  return {
52
  "role": message.role,
53
  "content": content,
@@ -59,11 +61,32 @@ def message_to_dict(message):
59
  }
60
  return {"role": message.role, "content": content}
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  # Process streaming response with headers from config.py
63
  async def process_streaming_response(request: ChatRequest):
64
- chat_id = generate_chat_id()
 
 
 
65
  agent_mode = AGENT_MODE.get(request.model, {})
66
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
 
 
67
 
68
  if request.model == 'o1-preview':
69
  delay_seconds = random.randint(1, 60)
@@ -81,7 +104,7 @@ async def process_streaming_response(request: ChatRequest):
81
  "isChromeExt": False,
82
  "isMicMode": False,
83
  "maxTokens": request.max_tokens,
84
- "messages": [message_to_dict(msg) for msg in request.messages],
85
  "mobileClient": False,
86
  "playgroundTemperature": request.temperature,
87
  "playgroundTopP": request.top_p,
@@ -98,8 +121,8 @@ async def process_streaming_response(request: ChatRequest):
98
  try:
99
  async with client.stream(
100
  "POST",
101
- f"{BASE_URL}/api/chat",
102
- headers=common_headers,
103
  json=json_data,
104
  timeout=100,
105
  ) as response:
@@ -108,7 +131,11 @@ async def process_streaming_response(request: ChatRequest):
108
  timestamp = int(datetime.now().timestamp())
109
  if line:
110
  content = line
111
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
 
 
 
112
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
113
  yield "data: [DONE]\n\n"
114
  except httpx.HTTPStatusError as e:
@@ -120,9 +147,15 @@ async def process_streaming_response(request: ChatRequest):
120
 
121
  # Process non-streaming response with headers from config.py
122
  async def process_non_streaming_response(request: ChatRequest):
123
- chat_id = generate_chat_id()
 
 
 
124
  agent_mode = AGENT_MODE.get(request.model, {})
125
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
 
 
126
 
127
  if request.model == 'o1-preview':
128
  delay_seconds = random.randint(20, 60)
@@ -140,7 +173,7 @@ async def process_non_streaming_response(request: ChatRequest):
140
  "isChromeExt": False,
141
  "isMicMode": False,
142
  "maxTokens": request.max_tokens,
143
- "messages": [message_to_dict(msg) for msg in request.messages],
144
  "mobileClient": False,
145
  "playgroundTemperature": request.temperature,
146
  "playgroundTopP": request.top_p,
@@ -157,7 +190,10 @@ async def process_non_streaming_response(request: ChatRequest):
157
  async with httpx.AsyncClient() as client:
158
  try:
159
  async with client.stream(
160
- method="POST", url=f"{BASE_URL}/api/chat", headers=common_headers, json=json_data
 
 
 
161
  ) as response:
162
  response.raise_for_status()
163
  async for chunk in response.aiter_text():
@@ -168,6 +204,11 @@ async def process_non_streaming_response(request: ChatRequest):
168
  except httpx.RequestError as e:
169
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
170
  raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
171
 
172
  return {
173
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -177,7 +218,7 @@ async def process_non_streaming_response(request: ChatRequest):
177
  "choices": [
178
  {
179
  "index": 0,
180
- "message": {"role": "assistant", "content": full_response},
181
  "finish_reason": "stop",
182
  }
183
  ],
 
10
  from fastapi import HTTPException
11
  from api.config import (
12
  MODEL_MAPPING,
13
+ get_headers_api_chat,
14
+ get_headers_chat,
15
  BASE_URL,
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
+ MODEL_PREFIXES,
19
+ API_ENDPOINT,
20
+ generate_id
21
  )
22
  from api.models import ChatRequest
23
  from api.logger import setup_logger
24
 
25
  logger = setup_logger(__name__)
26
 
 
 
 
 
 
27
  # Helper function to create chat completion data
28
  def create_chat_completion_data(
29
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
 
43
  "usage": None,
44
  }
45
 
46
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
47
+ def message_to_dict(message, model_prefix: Optional[str] = None):
48
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
49
+ if model_prefix:
50
+ content = f"{model_prefix} {content}"
51
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
52
+ # Ensure base64 images are always included for all models
53
  return {
54
  "role": message.role,
55
  "content": content,
 
61
  }
62
  return {"role": message.role, "content": content}
63
 
64
+ # Function to strip model prefix from content if present
65
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
66
+ """Remove the model prefix from the response content if present."""
67
+ if model_prefix and content.startswith(model_prefix):
68
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
69
+ return content[len(model_prefix):].strip()
70
+ return content
71
+
72
+ # Function to get the correct referer URL for logging
73
+ def get_referer_url(chat_id: str, model: str) -> str:
74
+ """Generate the referer URL based on specific models listed in MODEL_MAPPING."""
75
+ if model in MODEL_MAPPING:
76
+ return f"{BASE_URL}/chat/{chat_id}?model={model}"
77
+ return BASE_URL
78
+
79
  # Process streaming response with headers from config.py
80
  async def process_streaming_response(request: ChatRequest):
81
+ chat_id = generate_id()
82
+ referer_url = get_referer_url(chat_id, request.model)
83
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
84
+
85
  agent_mode = AGENT_MODE.get(request.model, {})
86
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
87
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
88
+
89
+ headers_api_chat = get_headers_api_chat(referer_url)
90
 
91
  if request.model == 'o1-preview':
92
  delay_seconds = random.randint(1, 60)
 
104
  "isChromeExt": False,
105
  "isMicMode": False,
106
  "maxTokens": request.max_tokens,
107
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
108
  "mobileClient": False,
109
  "playgroundTemperature": request.temperature,
110
  "playgroundTopP": request.top_p,
 
121
  try:
122
  async with client.stream(
123
  "POST",
124
+ API_ENDPOINT,
125
+ headers=headers_api_chat,
126
  json=json_data,
127
  timeout=100,
128
  ) as response:
 
131
  timestamp = int(datetime.now().timestamp())
132
  if line:
133
  content = line
134
+ if content.startswith("$@$v=undefined-rv1$@$"):
135
+ content = content[21:]
136
+ cleaned_content = strip_model_prefix(content, model_prefix)
137
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
138
+
139
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
140
  yield "data: [DONE]\n\n"
141
  except httpx.HTTPStatusError as e:
 
147
 
148
  # Process non-streaming response with headers from config.py
149
  async def process_non_streaming_response(request: ChatRequest):
150
+ chat_id = generate_id()
151
+ referer_url = get_referer_url(chat_id, request.model)
152
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
153
+
154
  agent_mode = AGENT_MODE.get(request.model, {})
155
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
156
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
157
+
158
+ headers_api_chat = get_headers_api_chat(referer_url)
159
 
160
  if request.model == 'o1-preview':
161
  delay_seconds = random.randint(20, 60)
 
173
  "isChromeExt": False,
174
  "isMicMode": False,
175
  "maxTokens": request.max_tokens,
176
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
177
  "mobileClient": False,
178
  "playgroundTemperature": request.temperature,
179
  "playgroundTopP": request.top_p,
 
190
  async with httpx.AsyncClient() as client:
191
  try:
192
  async with client.stream(
193
+ method="POST",
194
+ url=API_ENDPOINT,
195
+ headers=headers_api_chat,
196
+ json=json_data
197
  ) as response:
198
  response.raise_for_status()
199
  async for chunk in response.aiter_text():
 
204
  except httpx.RequestError as e:
205
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
206
  raise HTTPException(status_code=500, detail=str(e))
207
+
208
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
209
+ full_response = full_response[21:]
210
+
211
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
212
 
213
  return {
214
  "id": f"chatcmpl-{uuid.uuid4()}",
 
218
  "choices": [
219
  {
220
  "index": 0,
221
+ "message": {"role": "assistant", "content": cleaned_full_response},
222
  "finish_reason": "stop",
223
  }
224
  ],