Update api/utils.py
Browse files- api/utils.py +12 -32
api/utils.py
CHANGED
@@ -17,18 +17,12 @@ from api.config import (
|
|
17 |
AGENT_MODE,
|
18 |
TRENDING_AGENT_MODE,
|
19 |
MODEL_PREFIXES,
|
20 |
-
MODEL_REFERERS
|
21 |
)
|
22 |
from api.models import ChatRequest
|
23 |
from api.logger import setup_logger
|
24 |
|
25 |
logger = setup_logger(__name__)
|
26 |
|
27 |
-
# Helper function to create a random alphanumeric chat ID
|
28 |
-
def generate_chat_id(length: int = 7) -> str:
|
29 |
-
characters = string.ascii_letters + string.digits
|
30 |
-
return ''.join(random.choices(characters, k=length))
|
31 |
-
|
32 |
# Helper function to create chat completion data
|
33 |
def create_chat_completion_data(
|
34 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
@@ -74,31 +68,21 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
|
74 |
return content[len(model_prefix):].strip()
|
75 |
return content
|
76 |
|
77 |
-
# Function to get the correct referer URL for logging
|
78 |
-
def get_referer_url(chat_id: str, model: str) -> str:
|
79 |
-
"""Generate the referer URL based on specific models listed in MODEL_REFERERS."""
|
80 |
-
if model in MODEL_REFERERS:
|
81 |
-
return f"{BASE_URL}/chat/{chat_id}?model={model}"
|
82 |
-
return BASE_URL
|
83 |
-
|
84 |
# Process streaming response with headers from config.py
|
85 |
async def process_streaming_response(request: ChatRequest):
|
86 |
-
|
87 |
-
referer_url = get_referer_url(chat_id, request.model)
|
88 |
-
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
|
89 |
|
90 |
agent_mode = AGENT_MODE.get(request.model, {})
|
91 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
92 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
93 |
|
94 |
-
headers_api_chat = get_headers_api_chat(
|
95 |
validated_token = validate.getHid() # Get the validated token from validate.py
|
96 |
logger.info(f"Retrieved validated token: {validated_token}")
|
97 |
|
98 |
-
|
99 |
if request.model == 'o1-preview':
|
100 |
delay_seconds = random.randint(1, 60)
|
101 |
-
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'
|
102 |
await asyncio.sleep(delay_seconds)
|
103 |
|
104 |
json_data = {
|
@@ -108,7 +92,6 @@ async def process_streaming_response(request: ChatRequest):
|
|
108 |
"clickedForceWebSearch": False,
|
109 |
"codeModelMode": True,
|
110 |
"githubToken": None,
|
111 |
-
"id": chat_id,
|
112 |
"isChromeExt": False,
|
113 |
"isMicMode": False,
|
114 |
"maxTokens": request.max_tokens,
|
@@ -147,29 +130,27 @@ async def process_streaming_response(request: ChatRequest):
|
|
147 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
148 |
yield "data: [DONE]\n\n"
|
149 |
except httpx.HTTPStatusError as e:
|
150 |
-
logger.error(f"HTTP error occurred
|
151 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
152 |
except httpx.RequestError as e:
|
153 |
-
logger.error(f"Error occurred during request
|
154 |
raise HTTPException(status_code=500, detail=str(e))
|
155 |
|
156 |
# Process non-streaming response with headers from config.py
|
157 |
async def process_non_streaming_response(request: ChatRequest):
|
158 |
-
|
159 |
-
referer_url = get_referer_url(chat_id, request.model)
|
160 |
-
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
|
161 |
|
162 |
agent_mode = AGENT_MODE.get(request.model, {})
|
163 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
164 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
165 |
|
166 |
-
headers_api_chat = get_headers_api_chat(
|
167 |
-
headers_chat = get_headers_chat(
|
168 |
validated_token = validate.getHid() # Get the validated token from validate.py
|
169 |
|
170 |
if request.model == 'o1-preview':
|
171 |
delay_seconds = random.randint(20, 60)
|
172 |
-
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'
|
173 |
await asyncio.sleep(delay_seconds)
|
174 |
|
175 |
json_data = {
|
@@ -179,7 +160,6 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
179 |
"clickedForceWebSearch": False,
|
180 |
"codeModelMode": True,
|
181 |
"githubToken": None,
|
182 |
-
"id": chat_id,
|
183 |
"isChromeExt": False,
|
184 |
"isMicMode": False,
|
185 |
"maxTokens": request.max_tokens,
|
@@ -206,10 +186,10 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
206 |
async for chunk in response.aiter_text():
|
207 |
full_response += chunk
|
208 |
except httpx.HTTPStatusError as e:
|
209 |
-
logger.error(f"HTTP error occurred
|
210 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
211 |
except httpx.RequestError as e:
|
212 |
-
logger.error(f"Error occurred during request
|
213 |
raise HTTPException(status_code=500, detail=str(e))
|
214 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
215 |
full_response = full_response[21:]
|
@@ -229,4 +209,4 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
229 |
}
|
230 |
],
|
231 |
"usage": None,
|
232 |
-
}
|
|
|
17 |
AGENT_MODE,
|
18 |
TRENDING_AGENT_MODE,
|
19 |
MODEL_PREFIXES,
|
|
|
20 |
)
|
21 |
from api.models import ChatRequest
|
22 |
from api.logger import setup_logger
|
23 |
|
24 |
logger = setup_logger(__name__)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
26 |
# Helper function to create chat completion data
|
27 |
def create_chat_completion_data(
|
28 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
|
|
68 |
return content[len(model_prefix):].strip()
|
69 |
return content
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
# Process streaming response with headers from config.py
|
72 |
async def process_streaming_response(request: ChatRequest):
|
73 |
+
logger.info(f"Processing streaming response for Model: {request.model}")
|
|
|
|
|
74 |
|
75 |
agent_mode = AGENT_MODE.get(request.model, {})
|
76 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
77 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
78 |
|
79 |
+
headers_api_chat = get_headers_api_chat(BASE_URL)
|
80 |
validated_token = validate.getHid() # Get the validated token from validate.py
|
81 |
logger.info(f"Retrieved validated token: {validated_token}")
|
82 |
|
|
|
83 |
if request.model == 'o1-preview':
|
84 |
delay_seconds = random.randint(1, 60)
|
85 |
+
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
|
86 |
await asyncio.sleep(delay_seconds)
|
87 |
|
88 |
json_data = {
|
|
|
92 |
"clickedForceWebSearch": False,
|
93 |
"codeModelMode": True,
|
94 |
"githubToken": None,
|
|
|
95 |
"isChromeExt": False,
|
96 |
"isMicMode": False,
|
97 |
"maxTokens": request.max_tokens,
|
|
|
130 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
131 |
yield "data: [DONE]\n\n"
|
132 |
except httpx.HTTPStatusError as e:
|
133 |
+
logger.error(f"HTTP error occurred: {e}")
|
134 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
135 |
except httpx.RequestError as e:
|
136 |
+
logger.error(f"Error occurred during request: {e}")
|
137 |
raise HTTPException(status_code=500, detail=str(e))
|
138 |
|
139 |
# Process non-streaming response with headers from config.py
|
140 |
async def process_non_streaming_response(request: ChatRequest):
|
141 |
+
logger.info(f"Processing non-streaming response for Model: {request.model}")
|
|
|
|
|
142 |
|
143 |
agent_mode = AGENT_MODE.get(request.model, {})
|
144 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
145 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
146 |
|
147 |
+
headers_api_chat = get_headers_api_chat(BASE_URL)
|
148 |
+
headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
|
149 |
validated_token = validate.getHid() # Get the validated token from validate.py
|
150 |
|
151 |
if request.model == 'o1-preview':
|
152 |
delay_seconds = random.randint(20, 60)
|
153 |
+
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
|
154 |
await asyncio.sleep(delay_seconds)
|
155 |
|
156 |
json_data = {
|
|
|
160 |
"clickedForceWebSearch": False,
|
161 |
"codeModelMode": True,
|
162 |
"githubToken": None,
|
|
|
163 |
"isChromeExt": False,
|
164 |
"isMicMode": False,
|
165 |
"maxTokens": request.max_tokens,
|
|
|
186 |
async for chunk in response.aiter_text():
|
187 |
full_response += chunk
|
188 |
except httpx.HTTPStatusError as e:
|
189 |
+
logger.error(f"HTTP error occurred: {e}")
|
190 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
191 |
except httpx.RequestError as e:
|
192 |
+
logger.error(f"Error occurred during request: {e}")
|
193 |
raise HTTPException(status_code=500, detail=str(e))
|
194 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
195 |
full_response = full_response[21:]
|
|
|
209 |
}
|
210 |
],
|
211 |
"usage": None,
|
212 |
+
}
|