Niansuh commited on
Commit
1f7ef29
·
verified ·
1 Parent(s): d96bc9d

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +225 -216
api/utils.py CHANGED
@@ -1,216 +1,225 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
-
5
- import httpx
6
- from api.config import (
7
- MODEL_MAPPING,
8
- headers,
9
- AGENT_MODE,
10
- TRENDING_AGENT_MODE,
11
- BASE_URL,
12
- MODEL_PREFIXES,
13
- MODEL_REFERERS
14
- )
15
- from fastapi import HTTPException
16
- from api.models import ChatRequest
17
-
18
- from api.logger import setup_logger
19
-
20
- import uuid
21
- import asyncio
22
- import random # Newly added imports
23
-
24
- logger = setup_logger(__name__)
25
-
26
- def create_chat_completion_data(
27
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
- ) -> Dict[str, Any]:
29
- return {
30
- "id": f"chatcmpl-{uuid.uuid4()}",
31
- "object": "chat.completion.chunk",
32
- "created": timestamp,
33
- "model": model,
34
- "choices": [
35
- {
36
- "index": 0,
37
- "delta": {"content": content, "role": "assistant"},
38
- "finish_reason": finish_reason,
39
- }
40
- ],
41
- "usage": None,
42
- }
43
-
44
- def message_to_dict(message, model_prefix: Optional[str] = None):
45
- if isinstance(message.content, str):
46
- content = message.content
47
- if model_prefix:
48
- content = f"{model_prefix} {content}"
49
- return {"role": message.role, "content": content}
50
- elif isinstance(message.content, list) and len(message.content) == 2:
51
- content = message.content[0]["text"]
52
- if model_prefix:
53
- content = f"{model_prefix} {content}"
54
- return {
55
- "role": message.role,
56
- "content": content,
57
- "data": {
58
- "imageBase64": message.content[1]["image_url"]["url"],
59
- "fileText": "",
60
- "title": "snapshot",
61
- },
62
- }
63
- else:
64
- return {"role": message.role, "content": message.content}
65
-
66
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
67
- """Remove the model prefix from the response content if present."""
68
- if model_prefix and content.startswith(model_prefix):
69
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
70
- return content[len(model_prefix):].strip()
71
- logger.debug("No prefix to strip from content.")
72
- return content
73
-
74
- async def process_streaming_response(request: ChatRequest):
75
- agent_mode = AGENT_MODE.get(request.model, {})
76
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
77
- model_prefix = MODEL_PREFIXES.get(request.model, "")
78
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
79
- referer_url = f"{BASE_URL}{referer_path}"
80
-
81
- # Update headers with dynamic Referer
82
- dynamic_headers = headers.copy()
83
- dynamic_headers['Referer'] = referer_url
84
-
85
- # Introduce delay for 'o1-preview' model
86
- if request.model == 'o1-preview':
87
- delay_seconds = random.randint(1, 60)
88
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
89
- await asyncio.sleep(delay_seconds)
90
-
91
- json_data = {
92
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
93
- "previewToken": None,
94
- "userId": None,
95
- "codeModelMode": True,
96
- "agentMode": agent_mode,
97
- "trendingAgentMode": trending_agent_mode,
98
- "isMicMode": False,
99
- "userSystemPrompt": None,
100
- "maxTokens": request.max_tokens,
101
- "playgroundTopP": request.top_p,
102
- "playgroundTemperature": request.temperature,
103
- "isChromeExt": False,
104
- "githubToken": None,
105
- "clickedAnswer2": False,
106
- "clickedAnswer3": False,
107
- "clickedForceWebSearch": False,
108
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
109
- "visitFromDelta": False,
110
- "mobileClient": False,
111
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
112
- }
113
-
114
- async with httpx.AsyncClient() as client:
115
- try:
116
- async with client.stream(
117
- "POST",
118
- f"{BASE_URL}/api/chat",
119
- headers=dynamic_headers,
120
- json=json_data,
121
- timeout=100,
122
- ) as response:
123
- response.raise_for_status()
124
- async for line in response.aiter_lines():
125
- timestamp = int(datetime.now().timestamp())
126
- if line:
127
- content = line
128
- if content.startswith("$@$v=undefined-rv1$@$"):
129
- content = content[21:]
130
- # Strip the model prefix from the response content
131
- cleaned_content = strip_model_prefix(content, model_prefix)
132
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
133
-
134
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
135
- yield "data: [DONE]\n\n"
136
- except httpx.HTTPStatusError as e:
137
- logger.error(f"HTTP error occurred: {e}")
138
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
139
- except httpx.RequestError as e:
140
- logger.error(f"Error occurred during request: {e}")
141
- raise HTTPException(status_code=500, detail=str(e))
142
-
143
- async def process_non_streaming_response(request: ChatRequest):
144
- agent_mode = AGENT_MODE.get(request.model, {})
145
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
146
- model_prefix = MODEL_PREFIXES.get(request.model, "")
147
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
148
- referer_url = f"{BASE_URL}{referer_path}"
149
-
150
- # Update headers with dynamic Referer
151
- dynamic_headers = headers.copy()
152
- dynamic_headers['Referer'] = referer_url
153
-
154
- # Introduce delay for 'o1-preview' model
155
- if request.model == 'o1-preview':
156
- delay_seconds = random.randint(20, 60)
157
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
158
- await asyncio.sleep(delay_seconds)
159
-
160
- json_data = {
161
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
162
- "previewToken": None,
163
- "userId": None,
164
- "codeModelMode": True,
165
- "agentMode": agent_mode,
166
- "trendingAgentMode": trending_agent_mode,
167
- "isMicMode": False,
168
- "userSystemPrompt": None,
169
- "maxTokens": request.max_tokens,
170
- "playgroundTopP": request.top_p,
171
- "playgroundTemperature": request.temperature,
172
- "isChromeExt": False,
173
- "githubToken": None,
174
- "clickedAnswer2": False,
175
- "clickedAnswer3": False,
176
- "clickedForceWebSearch": False,
177
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
178
- "visitFromDelta": False,
179
- "mobileClient": False,
180
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
181
- }
182
- full_response = ""
183
- async with httpx.AsyncClient() as client:
184
- try:
185
- async with client.stream(
186
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
187
- ) as response:
188
- response.raise_for_status()
189
- async for chunk in response.aiter_text():
190
- full_response += chunk
191
- except httpx.HTTPStatusError as e:
192
- logger.error(f"HTTP error occurred: {e}")
193
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
194
- except httpx.RequestError as e:
195
- logger.error(f"Error occurred during request: {e}")
196
- raise HTTPException(status_code=500, detail=str(e))
197
- if full_response.startswith("$@$v=undefined-rv1$@$"):
198
- full_response = full_response[21:]
199
-
200
- # Strip the model prefix from the full response
201
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
202
-
203
- return {
204
- "id": f"chatcmpl-{uuid.uuid4()}",
205
- "object": "chat.completion",
206
- "created": int(datetime.now().timestamp()),
207
- "model": request.model,
208
- "choices": [
209
- {
210
- "index": 0,
211
- "message": {"role": "assistant", "content": cleaned_full_response},
212
- "finish_reason": "stop",
213
- }
214
- ],
215
- "usage": None,
216
- }
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import random
5
+ import asyncio
6
+ from typing import Any, Dict, Optional
7
+ import httpx
8
+ from fastapi import HTTPException
9
+ from api.config import (
10
+ MODEL_MAPPING,
11
+ headers,
12
+ AGENT_MODE,
13
+ TRENDING_AGENT_MODE,
14
+ BASE_URL,
15
+ MODEL_PREFIXES,
16
+ MODEL_REFERERS
17
+ )
18
+ from api.models import ChatRequest
19
+ from api.logger import setup_logger
20
+
21
+ # Initialize logger
22
+ logger = setup_logger(__name__)
23
+
24
+ # Function to generate a unique chat ID
25
+ def generate_chat_id() -> str:
26
+ return f"chat-{uuid.uuid4()}"
27
+
28
+ # Create chat completion data function
29
+ def create_chat_completion_data(
30
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
31
+ ) -> Dict[str, Any]:
32
+ return {
33
+ "id": f"chatcmpl-{uuid.uuid4()}",
34
+ "object": "chat.completion.chunk",
35
+ "created": timestamp,
36
+ "model": model,
37
+ "choices": [
38
+ {
39
+ "index": 0,
40
+ "delta": {"content": content, "role": "assistant"},
41
+ "finish_reason": finish_reason,
42
+ }
43
+ ],
44
+ "usage": None,
45
+ }
46
+
47
+ # Message to dict converter function
48
+ def message_to_dict(message, model_prefix: Optional[str] = None):
49
+ if isinstance(message.content, str):
50
+ content = message.content
51
+ if model_prefix:
52
+ content = f"{model_prefix} {content}"
53
+ return {"role": message.role, "content": content}
54
+ elif isinstance(message.content, list) and len(message.content) == 2:
55
+ content = message.content[0]["text"]
56
+ if model_prefix:
57
+ content = f"{model_prefix} {content}"
58
+ return {
59
+ "role": message.role,
60
+ "content": content,
61
+ "data": {
62
+ "imageBase64": message.content[1]["image_url"]["url"],
63
+ "fileText": "",
64
+ "title": "snapshot",
65
+ },
66
+ }
67
+ else:
68
+ return {"role": message.role, "content": message.content}
69
+
70
+ # Strip model prefix function
71
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
72
+ """Remove the model prefix from the response content if present."""
73
+ if model_prefix and content.startswith(model_prefix):
74
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
75
+ return content[len(model_prefix):].strip()
76
+ logger.debug("No prefix to strip from content.")
77
+ return content
78
+
79
+ # Process streaming response
80
+ async def process_streaming_response(request: ChatRequest):
81
+ chat_id = generate_chat_id() # Generate unique chat ID
82
+ agent_mode = AGENT_MODE.get(request.model, {})
83
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
84
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
85
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
86
+ referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" # Updated URL format with chat_id
87
+
88
+ # Update headers with dynamic Referer
89
+ dynamic_headers = headers.copy()
90
+ dynamic_headers['Referer'] = referer_url
91
+
92
+ # Introduce delay for 'o1-preview' model
93
+ if request.model == 'o1-preview':
94
+ delay_seconds = random.randint(1, 60)
95
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
96
+ await asyncio.sleep(delay_seconds)
97
+
98
+ json_data = {
99
+ "id": chat_id,
100
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
101
+ "previewToken": None,
102
+ "userId": None,
103
+ "codeModelMode": True,
104
+ "agentMode": agent_mode,
105
+ "trendingAgentMode": trending_agent_mode,
106
+ "isMicMode": False,
107
+ "userSystemPrompt": None,
108
+ "maxTokens": request.max_tokens,
109
+ "playgroundTopP": request.top_p,
110
+ "playgroundTemperature": request.temperature,
111
+ "isChromeExt": False,
112
+ "githubToken": None,
113
+ "clickedAnswer2": False,
114
+ "clickedAnswer3": False,
115
+ "clickedForceWebSearch": False,
116
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
117
+ "visitFromDelta": False,
118
+ "mobileClient": False,
119
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
120
+ }
121
+
122
+ async with httpx.AsyncClient() as client:
123
+ try:
124
+ async with client.stream(
125
+ "POST",
126
+ f"{BASE_URL}/api/chat",
127
+ headers=dynamic_headers,
128
+ json=json_data,
129
+ timeout=100,
130
+ ) as response:
131
+ response.raise_for_status()
132
+ async for line in response.aiter_lines():
133
+ timestamp = int(datetime.now().timestamp())
134
+ if line:
135
+ content = line
136
+ if content.startswith("$@$v=undefined-rv1$@$"):
137
+ content = content[21:]
138
+ cleaned_content = strip_model_prefix(content, model_prefix)
139
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
140
+
141
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
142
+ yield "data: [DONE]\n\n"
143
+ except httpx.HTTPStatusError as e:
144
+ logger.error(f"HTTP error occurred: {e}")
145
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
146
+ except httpx.RequestError as e:
147
+ logger.error(f"Error occurred during request: {e}")
148
+ raise HTTPException(status_code=500, detail=str(e))
149
+
150
+ # Process non-streaming response
151
+ async def process_non_streaming_response(request: ChatRequest):
152
+ chat_id = generate_chat_id() # Generate unique chat ID
153
+ agent_mode = AGENT_MODE.get(request.model, {})
154
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
155
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
156
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
157
+ referer_url = f"{BASE_URL}{referer_path}"
158
+
159
+ # Update headers with dynamic Referer
160
+ dynamic_headers = headers.copy()
161
+ dynamic_headers['Referer'] = referer_url
162
+
163
+ # Introduce delay for 'o1-preview' model
164
+ if request.model == 'o1-preview':
165
+ delay_seconds = random.randint(20, 60)
166
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
167
+ await asyncio.sleep(delay_seconds)
168
+
169
+ json_data = {
170
+ "id": chat_id,
171
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
172
+ "previewToken": None,
173
+ "userId": None,
174
+ "codeModelMode": True,
175
+ "agentMode": agent_mode,
176
+ "trendingAgentMode": trending_agent_mode,
177
+ "isMicMode": False,
178
+ "userSystemPrompt": None,
179
+ "maxTokens": request.max_tokens,
180
+ "playgroundTopP": request.top_p,
181
+ "playgroundTemperature": request.temperature,
182
+ "isChromeExt": False,
183
+ "githubToken": None,
184
+ "clickedAnswer2": False,
185
+ "clickedAnswer3": False,
186
+ "clickedForceWebSearch": False,
187
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
188
+ "visitFromDelta": False,
189
+ "mobileClient": False,
190
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
191
+ }
192
+ full_response = ""
193
+ async with httpx.AsyncClient() as client:
194
+ try:
195
+ async with client.stream(
196
+ method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
197
+ ) as response:
198
+ response.raise_for_status()
199
+ async for chunk in response.aiter_text():
200
+ full_response += chunk
201
+ except httpx.HTTPStatusError as e:
202
+ logger.error(f"HTTP error occurred: {e}")
203
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
204
+ except httpx.RequestError as e:
205
+ logger.error(f"Error occurred during request: {e}")
206
+ raise HTTPException(status_code=500, detail=str(e))
207
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
208
+ full_response = full_response[21:]
209
+
210
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
211
+
212
+ return {
213
+ "id": f"chatcmpl-{uuid.uuid4()}",
214
+ "object": "chat.completion",
215
+ "created": int(datetime.now().timestamp()),
216
+ "model": request.model,
217
+ "choices": [
218
+ {
219
+ "index": 0,
220
+ "message": {"role": "assistant", "content": cleaned_full_response},
221
+ "finish_reason": "stop",
222
+ }
223
+ ],
224
+ "usage": None,
225
+ }