Update api/utils.py
Browse files- api/utils.py +23 -22
api/utils.py
CHANGED
@@ -1,28 +1,28 @@
|
|
1 |
from datetime import datetime
|
2 |
import json
|
3 |
-
import uuid
|
4 |
-
import asyncio
|
5 |
-
import random
|
6 |
from typing import Any, Dict, Optional
|
7 |
|
8 |
import httpx
|
9 |
-
from fastapi import HTTPException
|
10 |
from api.config import (
|
11 |
MODEL_MAPPING,
|
12 |
-
|
13 |
-
get_headers_chat,
|
14 |
-
BASE_URL,
|
15 |
AGENT_MODE,
|
16 |
TRENDING_AGENT_MODE,
|
|
|
17 |
MODEL_PREFIXES,
|
18 |
MODEL_REFERERS
|
19 |
)
|
|
|
20 |
from api.models import ChatRequest
|
|
|
21 |
from api.logger import setup_logger
|
22 |
|
|
|
|
|
|
|
|
|
23 |
logger = setup_logger(__name__)
|
24 |
|
25 |
-
# Helper function to create chat completion data
|
26 |
def create_chat_completion_data(
|
27 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
28 |
) -> Dict[str, Any]:
|
@@ -41,7 +41,6 @@ def create_chat_completion_data(
|
|
41 |
"usage": None,
|
42 |
}
|
43 |
|
44 |
-
# Function to convert message to dictionary format with optional model prefix
|
45 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
46 |
if isinstance(message.content, str):
|
47 |
content = message.content
|
@@ -64,7 +63,6 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
|
|
64 |
else:
|
65 |
return {"role": message.role, "content": message.content}
|
66 |
|
67 |
-
# Function to strip model prefix from content if present
|
68 |
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
69 |
"""Remove the model prefix from the response content if present."""
|
70 |
if model_prefix and content.startswith(model_prefix):
|
@@ -73,7 +71,6 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
|
73 |
logger.debug("No prefix to strip from content.")
|
74 |
return content
|
75 |
|
76 |
-
# Process streaming response with headers from config.py
|
77 |
async def process_streaming_response(request: ChatRequest):
|
78 |
agent_mode = AGENT_MODE.get(request.model, {})
|
79 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
@@ -81,8 +78,9 @@ async def process_streaming_response(request: ChatRequest):
|
|
81 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
82 |
referer_url = f"{BASE_URL}{referer_path}"
|
83 |
|
84 |
-
#
|
85 |
-
|
|
|
86 |
|
87 |
# Introduce delay for 'o1-preview' model
|
88 |
if request.model == 'o1-preview':
|
@@ -118,7 +116,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
118 |
async with client.stream(
|
119 |
"POST",
|
120 |
f"{BASE_URL}/api/chat",
|
121 |
-
headers=
|
122 |
json=json_data,
|
123 |
timeout=100,
|
124 |
) as response:
|
@@ -142,18 +140,22 @@ async def process_streaming_response(request: ChatRequest):
|
|
142 |
logger.error(f"Error occurred during request: {e}")
|
143 |
raise HTTPException(status_code=500, detail=str(e))
|
144 |
|
145 |
-
# Process non-streaming response with headers from config.py
|
146 |
async def process_non_streaming_response(request: ChatRequest):
|
147 |
agent_mode = AGENT_MODE.get(request.model, {})
|
148 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
149 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
150 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
151 |
referer_url = f"{BASE_URL}{referer_path}"
|
152 |
-
chat_url = f"{BASE_URL}/chat/{uuid.uuid4()}?model={request.model}"
|
153 |
|
154 |
-
#
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
json_data = {
|
159 |
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
@@ -177,12 +179,11 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
177 |
"mobileClient": False,
|
178 |
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
179 |
}
|
180 |
-
|
181 |
full_response = ""
|
182 |
async with httpx.AsyncClient() as client:
|
183 |
try:
|
184 |
async with client.stream(
|
185 |
-
method="POST", url=f"{BASE_URL}/api/chat", headers=
|
186 |
) as response:
|
187 |
response.raise_for_status()
|
188 |
async for chunk in response.aiter_text():
|
@@ -212,4 +213,4 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
212 |
}
|
213 |
],
|
214 |
"usage": None,
|
215 |
-
}
|
|
|
1 |
from datetime import datetime
|
2 |
import json
|
|
|
|
|
|
|
3 |
from typing import Any, Dict, Optional
|
4 |
|
5 |
import httpx
|
|
|
6 |
from api.config import (
|
7 |
MODEL_MAPPING,
|
8 |
+
headers,
|
|
|
|
|
9 |
AGENT_MODE,
|
10 |
TRENDING_AGENT_MODE,
|
11 |
+
BASE_URL,
|
12 |
MODEL_PREFIXES,
|
13 |
MODEL_REFERERS
|
14 |
)
|
15 |
+
from fastapi import HTTPException
|
16 |
from api.models import ChatRequest
|
17 |
+
|
18 |
from api.logger import setup_logger
|
19 |
|
20 |
+
import uuid
|
21 |
+
import asyncio
|
22 |
+
import random # Newly added imports
|
23 |
+
|
24 |
logger = setup_logger(__name__)
|
25 |
|
|
|
26 |
def create_chat_completion_data(
|
27 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
28 |
) -> Dict[str, Any]:
|
|
|
41 |
"usage": None,
|
42 |
}
|
43 |
|
|
|
44 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
45 |
if isinstance(message.content, str):
|
46 |
content = message.content
|
|
|
63 |
else:
|
64 |
return {"role": message.role, "content": message.content}
|
65 |
|
|
|
66 |
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
67 |
"""Remove the model prefix from the response content if present."""
|
68 |
if model_prefix and content.startswith(model_prefix):
|
|
|
71 |
logger.debug("No prefix to strip from content.")
|
72 |
return content
|
73 |
|
|
|
74 |
async def process_streaming_response(request: ChatRequest):
|
75 |
agent_mode = AGENT_MODE.get(request.model, {})
|
76 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
78 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
79 |
referer_url = f"{BASE_URL}{referer_path}"
|
80 |
|
81 |
+
# Update headers with dynamic Referer
|
82 |
+
dynamic_headers = headers.copy()
|
83 |
+
dynamic_headers['Referer'] = referer_url
|
84 |
|
85 |
# Introduce delay for 'o1-preview' model
|
86 |
if request.model == 'o1-preview':
|
|
|
116 |
async with client.stream(
|
117 |
"POST",
|
118 |
f"{BASE_URL}/api/chat",
|
119 |
+
headers=dynamic_headers,
|
120 |
json=json_data,
|
121 |
timeout=100,
|
122 |
) as response:
|
|
|
140 |
logger.error(f"Error occurred during request: {e}")
|
141 |
raise HTTPException(status_code=500, detail=str(e))
|
142 |
|
|
|
143 |
async def process_non_streaming_response(request: ChatRequest):
|
144 |
agent_mode = AGENT_MODE.get(request.model, {})
|
145 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
146 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
147 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
148 |
referer_url = f"{BASE_URL}{referer_path}"
|
|
|
149 |
|
150 |
+
# Update headers with dynamic Referer
|
151 |
+
dynamic_headers = headers.copy()
|
152 |
+
dynamic_headers['Referer'] = referer_url
|
153 |
+
|
154 |
+
# Introduce delay for 'o1-preview' model
|
155 |
+
if request.model == 'o1-preview':
|
156 |
+
delay_seconds = random.randint(20, 60)
|
157 |
+
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
|
158 |
+
await asyncio.sleep(delay_seconds)
|
159 |
|
160 |
json_data = {
|
161 |
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
|
|
179 |
"mobileClient": False,
|
180 |
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
181 |
}
|
|
|
182 |
full_response = ""
|
183 |
async with httpx.AsyncClient() as client:
|
184 |
try:
|
185 |
async with client.stream(
|
186 |
+
method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
|
187 |
) as response:
|
188 |
response.raise_for_status()
|
189 |
async for chunk in response.aiter_text():
|
|
|
213 |
}
|
214 |
],
|
215 |
"usage": None,
|
216 |
+
}
|