Niansuh commited on
Commit
10c6421
·
verified ·
1 Parent(s): 552bcd7

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +214 -200
api/utils.py CHANGED
@@ -1,200 +1,214 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
-
5
- import httpx
6
- from api.config import (
7
- MODEL_MAPPING,
8
- headers,
9
- AGENT_MODE,
10
- TRENDING_AGENT_MODE,
11
- BASE_URL,
12
- MODEL_PREFIXES,
13
- MODEL_REFERERS
14
- )
15
- from fastapi import HTTPException
16
- from api.models import ChatRequest
17
-
18
- from api.logger import setup_logger
19
-
20
- import uuid
21
-
22
- logger = setup_logger(__name__)
23
-
24
- def create_chat_completion_data(
25
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
26
- ) -> Dict[str, Any]:
27
- return {
28
- "id": f"chatcmpl-{uuid.uuid4()}",
29
- "object": "chat.completion.chunk",
30
- "created": timestamp,
31
- "model": model,
32
- "choices": [
33
- {
34
- "index": 0,
35
- "delta": {"content": content, "role": "assistant"},
36
- "finish_reason": finish_reason,
37
- }
38
- ],
39
- "usage": None,
40
- }
41
-
42
- def message_to_dict(message, model_prefix: Optional[str] = None):
43
- if isinstance(message.content, str):
44
- content = message.content
45
- if model_prefix:
46
- content = f"{model_prefix} {content}"
47
- return {"role": message.role, "content": content}
48
- elif isinstance(message.content, list) and len(message.content) == 2:
49
- content = message.content[0]["text"]
50
- if model_prefix:
51
- content = f"{model_prefix} {content}"
52
- return {
53
- "role": message.role,
54
- "content": content,
55
- "data": {
56
- "imageBase64": message.content[1]["image_url"]["url"],
57
- "fileText": "",
58
- "title": "snapshot",
59
- },
60
- }
61
- else:
62
- return {"role": message.role, "content": message.content}
63
-
64
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
65
- """Remove the model prefix from the response content if present."""
66
- if model_prefix and content.startswith(model_prefix):
67
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
68
- return content[len(model_prefix):].strip()
69
- logger.debug("No prefix to strip from content.")
70
- return content
71
-
72
- async def process_streaming_response(request: ChatRequest):
73
- agent_mode = AGENT_MODE.get(request.model, {})
74
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
75
- model_prefix = MODEL_PREFIXES.get(request.model, "")
76
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
77
- referer_url = f"{BASE_URL}{referer_path}"
78
-
79
- # Update headers with dynamic Referer
80
- dynamic_headers = headers.copy()
81
- dynamic_headers['Referer'] = referer_url
82
-
83
- json_data = {
84
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
85
- "previewToken": None,
86
- "userId": None,
87
- "codeModelMode": True,
88
- "agentMode": agent_mode,
89
- "trendingAgentMode": trending_agent_mode,
90
- "isMicMode": False,
91
- "userSystemPrompt": None,
92
- "maxTokens": request.max_tokens,
93
- "playgroundTopP": request.top_p,
94
- "playgroundTemperature": request.temperature,
95
- "isChromeExt": False,
96
- "githubToken": None,
97
- "clickedAnswer2": False,
98
- "clickedAnswer3": False,
99
- "clickedForceWebSearch": False,
100
- "visitFromDelta": False,
101
- "mobileClient": False,
102
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
103
- }
104
-
105
- async with httpx.AsyncClient() as client:
106
- try:
107
- async with client.stream(
108
- "POST",
109
- f"{BASE_URL}/api/chat",
110
- headers=dynamic_headers,
111
- json=json_data,
112
- timeout=100,
113
- ) as response:
114
- response.raise_for_status()
115
- async for line in response.aiter_lines():
116
- timestamp = int(datetime.now().timestamp())
117
- if line:
118
- content = line
119
- if content.startswith("$@$v=undefined-rv1$@$"):
120
- content = content[21:]
121
- # Strip the model prefix from the response content
122
- cleaned_content = strip_model_prefix(content, model_prefix)
123
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
124
-
125
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
126
- yield "data: [DONE]\n\n"
127
- except httpx.HTTPStatusError as e:
128
- logger.error(f"HTTP error occurred: {e}")
129
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
130
- except httpx.RequestError as e:
131
- logger.error(f"Error occurred during request: {e}")
132
- raise HTTPException(status_code=500, detail=str(e))
133
-
134
- async def process_non_streaming_response(request: ChatRequest):
135
- agent_mode = AGENT_MODE.get(request.model, {})
136
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
137
- model_prefix = MODEL_PREFIXES.get(request.model, "")
138
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
139
- referer_url = f"{BASE_URL}{referer_path}"
140
-
141
- # Update headers with dynamic Referer
142
- dynamic_headers = headers.copy()
143
- dynamic_headers['Referer'] = referer_url
144
-
145
- json_data = {
146
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
147
- "previewToken": None,
148
- "userId": None,
149
- "codeModelMode": True,
150
- "agentMode": agent_mode,
151
- "trendingAgentMode": trending_agent_mode,
152
- "isMicMode": False,
153
- "userSystemPrompt": None,
154
- "maxTokens": request.max_tokens,
155
- "playgroundTopP": request.top_p,
156
- "playgroundTemperature": request.temperature,
157
- "isChromeExt": False,
158
- "githubToken": None,
159
- "clickedAnswer2": False,
160
- "clickedAnswer3": False,
161
- "clickedForceWebSearch": False,
162
- "visitFromDelta": False,
163
- "mobileClient": False,
164
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
165
- }
166
- full_response = ""
167
- async with httpx.AsyncClient() as client:
168
- try:
169
- async with client.stream(
170
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
171
- ) as response:
172
- response.raise_for_status()
173
- async for chunk in response.aiter_text():
174
- full_response += chunk
175
- except httpx.HTTPStatusError as e:
176
- logger.error(f"HTTP error occurred: {e}")
177
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
178
- except httpx.RequestError as e:
179
- logger.error(f"Error occurred during request: {e}")
180
- raise HTTPException(status_code=500, detail=str(e))
181
- if full_response.startswith("$@$v=undefined-rv1$@$"):
182
- full_response = full_response[21:]
183
-
184
- # Strip the model prefix from the full response
185
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
186
-
187
- return {
188
- "id": f"chatcmpl-{uuid.uuid4()}",
189
- "object": "chat.completion",
190
- "created": int(datetime.now().timestamp()),
191
- "model": request.model,
192
- "choices": [
193
- {
194
- "index": 0,
195
- "message": {"role": "assistant", "content": cleaned_full_response},
196
- "finish_reason": "stop",
197
- }
198
- ],
199
- "usage": None,
200
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+
5
+ import httpx
6
+ from api.config import (
7
+ MODEL_MAPPING,
8
+ headers,
9
+ AGENT_MODE,
10
+ TRENDING_AGENT_MODE,
11
+ BASE_URL,
12
+ MODEL_PREFIXES,
13
+ MODEL_REFERERS
14
+ )
15
+ from fastapi import HTTPException
16
+ from api.models import ChatRequest
17
+
18
+ from api.logger import setup_logger
19
+
20
+ import uuid
21
+ import asyncio
22
+ import random # Newly added imports
23
+
24
+ logger = setup_logger(__name__)
25
+
26
+ def create_chat_completion_data(
27
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
+ ) -> Dict[str, Any]:
29
+ return {
30
+ "id": f"chatcmpl-{uuid.uuid4()}",
31
+ "object": "chat.completion.chunk",
32
+ "created": timestamp,
33
+ "model": model,
34
+ "choices": [
35
+ {
36
+ "index": 0,
37
+ "delta": {"content": content, "role": "assistant"},
38
+ "finish_reason": finish_reason,
39
+ }
40
+ ],
41
+ "usage": None,
42
+ }
43
+
44
+ def message_to_dict(message, model_prefix: Optional[str] = None):
45
+ if isinstance(message.content, str):
46
+ content = message.content
47
+ if model_prefix:
48
+ content = f"{model_prefix} {content}"
49
+ return {"role": message.role, "content": content}
50
+ elif isinstance(message.content, list) and len(message.content) == 2:
51
+ content = message.content[0]["text"]
52
+ if model_prefix:
53
+ content = f"{model_prefix} {content}"
54
+ return {
55
+ "role": message.role,
56
+ "content": content,
57
+ "data": {
58
+ "imageBase64": message.content[1]["image_url"]["url"],
59
+ "fileText": "",
60
+ "title": "snapshot",
61
+ },
62
+ }
63
+ else:
64
+ return {"role": message.role, "content": message.content}
65
+
66
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
67
+ """Remove the model prefix from the response content if present."""
68
+ if model_prefix and content.startswith(model_prefix):
69
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
70
+ return content[len(model_prefix):].strip()
71
+ logger.debug("No prefix to strip from content.")
72
+ return content
73
+
74
+ async def process_streaming_response(request: ChatRequest):
75
+ agent_mode = AGENT_MODE.get(request.model, {})
76
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
77
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
78
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
79
+ referer_url = f"{BASE_URL}{referer_path}"
80
+
81
+ # Update headers with dynamic Referer
82
+ dynamic_headers = headers.copy()
83
+ dynamic_headers['Referer'] = referer_url
84
+
85
+ # Introduce delay for 'o1-preview' model
86
+ if request.model == 'o1-preview':
87
+ delay_seconds = random.randint(20, 60)
88
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
89
+ await asyncio.sleep(delay_seconds)
90
+
91
+ json_data = {
92
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
93
+ "previewToken": None,
94
+ "userId": None,
95
+ "codeModelMode": True,
96
+ "agentMode": agent_mode,
97
+ "trendingAgentMode": trending_agent_mode,
98
+ "isMicMode": False,
99
+ "userSystemPrompt": None,
100
+ "maxTokens": request.max_tokens,
101
+ "playgroundTopP": request.top_p,
102
+ "playgroundTemperature": request.temperature,
103
+ "isChromeExt": False,
104
+ "githubToken": None,
105
+ "clickedAnswer2": False,
106
+ "clickedAnswer3": False,
107
+ "clickedForceWebSearch": False,
108
+ "visitFromDelta": False,
109
+ "mobileClient": False,
110
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
111
+ }
112
+
113
+ async with httpx.AsyncClient() as client:
114
+ try:
115
+ async with client.stream(
116
+ "POST",
117
+ f"{BASE_URL}/api/chat",
118
+ headers=dynamic_headers,
119
+ json=json_data,
120
+ timeout=100,
121
+ ) as response:
122
+ response.raise_for_status()
123
+ async for line in response.aiter_lines():
124
+ timestamp = int(datetime.now().timestamp())
125
+ if line:
126
+ content = line
127
+ if content.startswith("$@$v=undefined-rv1$@$"):
128
+ content = content[21:]
129
+ # Strip the model prefix from the response content
130
+ cleaned_content = strip_model_prefix(content, model_prefix)
131
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
132
+
133
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
134
+ yield "data: [DONE]\n\n"
135
+ except httpx.HTTPStatusError as e:
136
+ logger.error(f"HTTP error occurred: {e}")
137
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
138
+ except httpx.RequestError as e:
139
+ logger.error(f"Error occurred during request: {e}")
140
+ raise HTTPException(status_code=500, detail=str(e))
141
+
142
+ async def process_non_streaming_response(request: ChatRequest):
143
+ agent_mode = AGENT_MODE.get(request.model, {})
144
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
145
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
146
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
147
+ referer_url = f"{BASE_URL}{referer_path}"
148
+
149
+ # Update headers with dynamic Referer
150
+ dynamic_headers = headers.copy()
151
+ dynamic_headers['Referer'] = referer_url
152
+
153
+ # Introduce delay for 'o1-preview' model
154
+ if request.model == 'o1-preview':
155
+ delay_seconds = random.randint(20, 60)
156
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
157
+ await asyncio.sleep(delay_seconds)
158
+
159
+ json_data = {
160
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
161
+ "previewToken": None,
162
+ "userId": None,
163
+ "codeModelMode": True,
164
+ "agentMode": agent_mode,
165
+ "trendingAgentMode": trending_agent_mode,
166
+ "isMicMode": False,
167
+ "userSystemPrompt": None,
168
+ "maxTokens": request.max_tokens,
169
+ "playgroundTopP": request.top_p,
170
+ "playgroundTemperature": request.temperature,
171
+ "isChromeExt": False,
172
+ "githubToken": None,
173
+ "clickedAnswer2": False,
174
+ "clickedAnswer3": False,
175
+ "clickedForceWebSearch": False,
176
+ "visitFromDelta": False,
177
+ "mobileClient": False,
178
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
179
+ }
180
+ full_response = ""
181
+ async with httpx.AsyncClient() as client:
182
+ try:
183
+ async with client.stream(
184
+ method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
185
+ ) as response:
186
+ response.raise_for_status()
187
+ async for chunk in response.aiter_text():
188
+ full_response += chunk
189
+ except httpx.HTTPStatusError as e:
190
+ logger.error(f"HTTP error occurred: {e}")
191
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
192
+ except httpx.RequestError as e:
193
+ logger.error(f"Error occurred during request: {e}")
194
+ raise HTTPException(status_code=500, detail=str(e))
195
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
196
+ full_response = full_response[21:]
197
+
198
+ # Strip the model prefix from the full response
199
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
200
+
201
+ return {
202
+ "id": f"chatcmpl-{uuid.uuid4()}",
203
+ "object": "chat.completion",
204
+ "created": int(datetime.now().timestamp()),
205
+ "model": request.model,
206
+ "choices": [
207
+ {
208
+ "index": 0,
209
+ "message": {"role": "assistant", "content": cleaned_full_response},
210
+ "finish_reason": "stop",
211
+ }
212
+ ],
213
+ "usage": None,
214
+ }