Niansuh commited on
Commit
e19211a
·
verified ·
1 Parent(s): 4311a71

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +227 -227
api/utils.py CHANGED
@@ -1,227 +1,227 @@
1
- from datetime import datetime
2
- import json
3
- import uuid
4
- import asyncio
5
- import random
6
- import string
7
- from typing import Any, Dict, Optional
8
-
9
- import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- MODEL_MAPPING,
13
- get_headers_api_chat,
14
- get_headers_chat,
15
- BASE_URL,
16
- AGENT_MODE,
17
- TRENDING_AGENT_MODE,
18
- MODEL_PREFIXES,
19
- MODEL_REFERERS
20
- )
21
- from api.models import ChatRequest
22
- from api.logger import setup_logger
23
-
24
- logger = setup_logger(__name__)
25
-
26
- # Helper function to create a random alphanumeric chat ID
27
- def generate_chat_id(length: int = 7) -> str:
28
- characters = string.ascii_letters + string.digits
29
- return ''.join(random.choices(characters, k=length))
30
-
31
- # Helper function to create chat completion data
32
- def create_chat_completion_data(
33
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
34
- ) -> Dict[str, Any]:
35
- return {
36
- "id": f"chatcmpl-{uuid.uuid4()}",
37
- "object": "chat.completion.chunk",
38
- "created": timestamp,
39
- "model": model,
40
- "choices": [
41
- {
42
- "index": 0,
43
- "delta": {"content": content, "role": "assistant"},
44
- "finish_reason": finish_reason,
45
- }
46
- ],
47
- "usage": None,
48
- }
49
-
50
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
51
- def message_to_dict(message, model_prefix: Optional[str] = None):
52
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
53
- if model_prefix:
54
- content = f"{model_prefix} {content}"
55
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
- # Ensure base64 images are always included for all models
57
- return {
58
- "role": message.role,
59
- "content": content,
60
- "data": {
61
- "imageBase64": message.content[1]["image_url"]["url"],
62
- "fileText": "",
63
- "title": "snapshot",
64
- },
65
- }
66
- return {"role": message.role, "content": content}
67
-
68
- # Function to strip model prefix from content if present
69
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
- """Remove the model prefix from the response content if present."""
71
- if model_prefix and content.startswith(model_prefix):
72
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
- return content[len(model_prefix):].strip()
74
- return content
75
-
76
- # Function to get the correct referer URL for logging
77
- def get_referer_url(chat_id: str, model: str) -> str:
78
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
79
- if model in MODEL_REFERERS:
80
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
- return BASE_URL
82
-
83
- # Process streaming response with headers from config.py
84
- async def process_streaming_response(request: ChatRequest):
85
- chat_id = generate_chat_id()
86
- referer_url = get_referer_url(chat_id, request.model)
87
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
88
-
89
- agent_mode = AGENT_MODE.get(request.model, {})
90
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
- model_prefix = MODEL_PREFIXES.get(request.model, "")
92
-
93
- headers_api_chat = get_headers_api_chat(referer_url)
94
-
95
- if request.model == 'o1-preview':
96
- delay_seconds = random.randint(1, 60)
97
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
98
- await asyncio.sleep(delay_seconds)
99
-
100
- json_data = {
101
- "agentMode": agent_mode,
102
- "clickedAnswer2": False,
103
- "clickedAnswer3": False,
104
- "clickedForceWebSearch": False,
105
- "codeModelMode": True,
106
- "githubToken": None,
107
- "id": chat_id,
108
- "isChromeExt": False,
109
- "isMicMode": False,
110
- "maxTokens": request.max_tokens,
111
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
112
- "mobileClient": False,
113
- "playgroundTemperature": request.temperature,
114
- "playgroundTopP": request.top_p,
115
- "previewToken": None,
116
- "trendingAgentMode": trending_agent_mode,
117
- "userId": None,
118
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
119
- "userSystemPrompt": None,
120
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
121
- "visitFromDelta": False,
122
- }
123
-
124
- async with httpx.AsyncClient() as client:
125
- try:
126
- async with client.stream(
127
- "POST",
128
- f"{BASE_URL}/api/chat",
129
- headers=headers_api_chat,
130
- json=json_data,
131
- timeout=100,
132
- ) as response:
133
- response.raise_for_status()
134
- async for line in response.aiter_lines():
135
- timestamp = int(datetime.now().timestamp())
136
- if line:
137
- content = line
138
- if content.startswith("$@$v=undefined-rv1$@$"):
139
- content = content[21:]
140
- cleaned_content = strip_model_prefix(content, model_prefix)
141
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
-
143
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
144
- yield "data: [DONE]\n\n"
145
- except httpx.HTTPStatusError as e:
146
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
147
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
148
- except httpx.RequestError as e:
149
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
150
- raise HTTPException(status_code=500, detail=str(e))
151
-
152
- # Process non-streaming response with headers from config.py
153
- async def process_non_streaming_response(request: ChatRequest):
154
- chat_id = generate_chat_id()
155
- referer_url = get_referer_url(chat_id, request.model)
156
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
-
158
- agent_mode = AGENT_MODE.get(request.model, {})
159
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
- model_prefix = MODEL_PREFIXES.get(request.model, "")
161
-
162
- headers_api_chat = get_headers_api_chat(referer_url)
163
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
-
165
- if request.model == 'o1-preview':
166
- delay_seconds = random.randint(20, 60)
167
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
168
- await asyncio.sleep(delay_seconds)
169
-
170
- json_data = {
171
- "agentMode": agent_mode,
172
- "clickedAnswer2": False,
173
- "clickedAnswer3": False,
174
- "clickedForceWebSearch": False,
175
- "codeModelMode": True,
176
- "githubToken": None,
177
- "id": chat_id,
178
- "isChromeExt": False,
179
- "isMicMode": False,
180
- "maxTokens": request.max_tokens,
181
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
182
- "mobileClient": False,
183
- "playgroundTemperature": request.temperature,
184
- "playgroundTopP": request.top_p,
185
- "previewToken": None,
186
- "trendingAgentMode": trending_agent_mode,
187
- "userId": None,
188
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
189
- "userSystemPrompt": None,
190
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
191
- "visitFromDelta": False,
192
- }
193
-
194
- full_response = ""
195
- async with httpx.AsyncClient() as client:
196
- try:
197
- async with client.stream(
198
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
199
- ) as response:
200
- response.raise_for_status()
201
- async for chunk in response.aiter_text():
202
- full_response += chunk
203
- except httpx.HTTPStatusError as e:
204
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
205
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
206
- except httpx.RequestError as e:
207
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
208
- raise HTTPException(status_code=500, detail=str(e))
209
- if full_response.startswith("$@$v=undefined-rv1$@$"):
210
- full_response = full_response[21:]
211
-
212
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
-
214
- return {
215
- "id": f"chatcmpl-{uuid.uuid4()}",
216
- "object": "chat.completion",
217
- "created": int(datetime.now().timestamp()),
218
- "model": request.model,
219
- "choices": [
220
- {
221
- "index": 0,
222
- "message": {"role": "assistant", "content": cleaned_full_response},
223
- "finish_reason": "stop",
224
- }
225
- ],
226
- "usage": None,
227
- }
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
+ import string
7
+ from typing import Any, Dict, Optional
8
+
9
+ import httpx
10
+ from fastapi import HTTPException
11
+ from api.config import (
12
+ MODEL_MAPPING,
13
+ get_headers_api_chat,
14
+ get_headers_chat,
15
+ BASE_URL,
16
+ AGENT_MODE,
17
+ TRENDING_AGENT_MODE,
18
+ MODEL_PREFIXES,
19
+ MODEL_REFERERS
20
+ )
21
+ from api.models import ChatRequest
22
+ from api.logger import setup_logger
23
+
24
+ logger = setup_logger(__name__)
25
+
26
+ # Helper function to create a random alphanumeric chat ID
27
+ def generate_chat_id(length: int = 7) -> str:
28
+ characters = string.ascii_letters + string.digits
29
+ return ''.join(random.choices(characters, k=length))
30
+
31
+ # Helper function to create chat completion data
32
+ def create_chat_completion_data(
33
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
34
+ ) -> Dict[str, Any]:
35
+ return {
36
+ "id": f"chatcmpl-{uuid.uuid4()}",
37
+ "object": "chat.completion.chunk",
38
+ "created": timestamp,
39
+ "model": model,
40
+ "choices": [
41
+ {
42
+ "index": 0,
43
+ "delta": {"content": content, "role": "assistant"},
44
+ "finish_reason": finish_reason,
45
+ }
46
+ ],
47
+ "usage": None,
48
+ }
49
+
50
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
51
+ def message_to_dict(message, model_prefix: Optional[str] = None):
52
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
53
+ if model_prefix:
54
+ content = f"{model_prefix} {content}"
55
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
+ # Ensure base64 images are always included for all models
57
+ return {
58
+ "role": message.role,
59
+ "content": content,
60
+ "data": {
61
+ "imageBase64": message.content[1]["image_url"]["url"],
62
+ "fileText": "",
63
+ "title": "snapshot",
64
+ },
65
+ }
66
+ return {"role": message.role, "content": content}
67
+
68
+ # Function to strip model prefix from content if present
69
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
+ """Remove the model prefix from the response content if present."""
71
+ if model_prefix and content.startswith(model_prefix):
72
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
+ return content[len(model_prefix):].strip()
74
+ return content
75
+
76
+ # Function to get the correct referer URL for logging
77
+ def get_referer_url(chat_id: str, model: str) -> str:
78
+ """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
79
+ if model in MODEL_REFERERS:
80
+ return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
+ return BASE_URL
82
+
83
+ # Process streaming response with headers from config.py
84
+ async def process_streaming_response(request: ChatRequest):
85
+ chat_id = generate_chat_id()
86
+ referer_url = get_referer_url(chat_id, request.model)
87
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
88
+
89
+ agent_mode = AGENT_MODE.get(request.model, {})
90
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
92
+
93
+ headers_api_chat = get_headers_api_chat(referer_url)
94
+
95
+ if request.model == 'o1-preview':
96
+ delay_seconds = random.randint(1, 60)
97
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
98
+ await asyncio.sleep(delay_seconds)
99
+
100
+ json_data = {
101
+ "agentMode": agent_mode,
102
+ "clickedAnswer2": False,
103
+ "clickedAnswer3": False,
104
+ "clickedForceWebSearch": False,
105
+ "codeModelMode": True,
106
+ "githubToken": None,
107
+ "id": fWr6d8p,
108
+ "isChromeExt": False,
109
+ "isMicMode": False,
110
+ "maxTokens": request.max_tokens,
111
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
112
+ "mobileClient": False,
113
+ "playgroundTemperature": request.temperature,
114
+ "playgroundTopP": request.top_p,
115
+ "previewToken": None,
116
+ "trendingAgentMode": trending_agent_mode,
117
+ "userId": None,
118
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
119
+ "userSystemPrompt": None,
120
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
121
+ "visitFromDelta": False,
122
+ }
123
+
124
+ async with httpx.AsyncClient() as client:
125
+ try:
126
+ async with client.stream(
127
+ "POST",
128
+ f"{BASE_URL}/api/chat",
129
+ headers=headers_api_chat,
130
+ json=json_data,
131
+ timeout=100,
132
+ ) as response:
133
+ response.raise_for_status()
134
+ async for line in response.aiter_lines():
135
+ timestamp = int(datetime.now().timestamp())
136
+ if line:
137
+ content = line
138
+ if content.startswith("$@$v=undefined-rv1$@$"):
139
+ content = content[21:]
140
+ cleaned_content = strip_model_prefix(content, model_prefix)
141
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
+
143
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
144
+ yield "data: [DONE]\n\n"
145
+ except httpx.HTTPStatusError as e:
146
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
147
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
148
+ except httpx.RequestError as e:
149
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
150
+ raise HTTPException(status_code=500, detail=str(e))
151
+
152
+ # Process non-streaming response with headers from config.py
153
+ async def process_non_streaming_response(request: ChatRequest):
154
+ chat_id = generate_chat_id()
155
+ referer_url = get_referer_url(chat_id, request.model)
156
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
+
158
+ agent_mode = AGENT_MODE.get(request.model, {})
159
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
161
+
162
+ headers_api_chat = get_headers_api_chat(referer_url)
163
+ headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
+
165
+ if request.model == 'o1-preview':
166
+ delay_seconds = random.randint(20, 60)
167
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
168
+ await asyncio.sleep(delay_seconds)
169
+
170
+ json_data = {
171
+ "agentMode": agent_mode,
172
+ "clickedAnswer2": False,
173
+ "clickedAnswer3": False,
174
+ "clickedForceWebSearch": False,
175
+ "codeModelMode": True,
176
+ "githubToken": None,
177
+ "id": fWr6d8p,
178
+ "isChromeExt": False,
179
+ "isMicMode": False,
180
+ "maxTokens": request.max_tokens,
181
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
182
+ "mobileClient": False,
183
+ "playgroundTemperature": request.temperature,
184
+ "playgroundTopP": request.top_p,
185
+ "previewToken": None,
186
+ "trendingAgentMode": trending_agent_mode,
187
+ "userId": None,
188
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
189
+ "userSystemPrompt": None,
190
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
191
+ "visitFromDelta": False,
192
+ }
193
+
194
+ full_response = ""
195
+ async with httpx.AsyncClient() as client:
196
+ try:
197
+ async with client.stream(
198
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
199
+ ) as response:
200
+ response.raise_for_status()
201
+ async for chunk in response.aiter_text():
202
+ full_response += chunk
203
+ except httpx.HTTPStatusError as e:
204
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
205
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
206
+ except httpx.RequestError as e:
207
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
208
+ raise HTTPException(status_code=500, detail=str(e))
209
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
210
+ full_response = full_response[21:]
211
+
212
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
+
214
+ return {
215
+ "id": f"chatcmpl-{uuid.uuid4()}",
216
+ "object": "chat.completion",
217
+ "created": int(datetime.now().timestamp()),
218
+ "model": request.model,
219
+ "choices": [
220
+ {
221
+ "index": 0,
222
+ "message": {"role": "assistant", "content": cleaned_full_response},
223
+ "finish_reason": "stop",
224
+ }
225
+ ],
226
+ "usage": None,
227
+ }