Update api/utils.py
Browse files- api/utils.py +7 -28
api/utils.py
CHANGED
@@ -9,11 +9,11 @@ import httpx
|
|
9 |
from fastapi import HTTPException
|
10 |
from api.config import (
|
11 |
MODEL_MAPPING,
|
12 |
-
get_headers_api_chat,
|
13 |
BASE_URL,
|
14 |
AGENT_MODE,
|
15 |
TRENDING_AGENT_MODE,
|
16 |
MODEL_PREFIXES,
|
|
|
17 |
)
|
18 |
from api.models import ChatRequest
|
19 |
from api.logger import setup_logger
|
@@ -23,7 +23,6 @@ from api import validate
|
|
23 |
|
24 |
logger = setup_logger(__name__)
|
25 |
|
26 |
-
|
27 |
# Helper function to create chat completion data
|
28 |
def create_chat_completion_data(
|
29 |
content: str,
|
@@ -46,7 +45,6 @@ def create_chat_completion_data(
|
|
46 |
"usage": None,
|
47 |
}
|
48 |
|
49 |
-
|
50 |
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
|
51 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
52 |
content = (
|
@@ -73,7 +71,6 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
|
|
73 |
}
|
74 |
return {"role": message.role, "content": content}
|
75 |
|
76 |
-
|
77 |
# Function to strip model prefix from content if present
|
78 |
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
79 |
"""Remove the model prefix from the response content if present."""
|
@@ -82,26 +79,14 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
|
82 |
return content[len(model_prefix):].strip()
|
83 |
return content
|
84 |
|
85 |
-
|
86 |
-
# Function to get the referer URL
|
87 |
-
def get_referer_url(model: str) -> str:
|
88 |
-
"""Generate the referer URL based on the model."""
|
89 |
-
return BASE_URL
|
90 |
-
|
91 |
-
|
92 |
-
# Process streaming response with headers from config.py
|
93 |
async def process_streaming_response(request: ChatRequest):
|
94 |
-
|
95 |
-
logger.info(
|
96 |
-
f"Model: {request.model} - URL: {referer_url}"
|
97 |
-
)
|
98 |
|
99 |
agent_mode = AGENT_MODE.get(request.model, {})
|
100 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
101 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
102 |
|
103 |
-
headers_api_chat = get_headers_api_chat(referer_url)
|
104 |
-
|
105 |
if request.model == 'o1-preview':
|
106 |
delay_seconds = random.randint(1, 60)
|
107 |
logger.info(
|
@@ -139,7 +124,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
139 |
async with client.stream(
|
140 |
"POST",
|
141 |
f"{BASE_URL}/api/chat",
|
142 |
-
headers=
|
143 |
json=json_data,
|
144 |
timeout=100,
|
145 |
) as response:
|
@@ -175,20 +160,14 @@ async def process_streaming_response(request: ChatRequest):
|
|
175 |
logger.error(f"Error occurred during request: {e}")
|
176 |
raise HTTPException(status_code=500, detail=str(e))
|
177 |
|
178 |
-
|
179 |
-
# Process non-streaming response with headers from config.py
|
180 |
async def process_non_streaming_response(request: ChatRequest):
|
181 |
-
|
182 |
-
logger.info(
|
183 |
-
f"Model: {request.model} - URL: {referer_url}"
|
184 |
-
)
|
185 |
|
186 |
agent_mode = AGENT_MODE.get(request.model, {})
|
187 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
188 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
189 |
|
190 |
-
headers_api_chat = get_headers_api_chat(referer_url)
|
191 |
-
|
192 |
if request.model == 'o1-preview':
|
193 |
delay_seconds = random.randint(20, 60)
|
194 |
logger.info(
|
@@ -226,7 +205,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
226 |
try:
|
227 |
response = await client.post(
|
228 |
url=f"{BASE_URL}/api/chat",
|
229 |
-
headers=
|
230 |
json=json_data,
|
231 |
)
|
232 |
response.raise_for_status()
|
|
|
9 |
from fastapi import HTTPException
|
10 |
from api.config import (
|
11 |
MODEL_MAPPING,
|
|
|
12 |
BASE_URL,
|
13 |
AGENT_MODE,
|
14 |
TRENDING_AGENT_MODE,
|
15 |
MODEL_PREFIXES,
|
16 |
+
headers,
|
17 |
)
|
18 |
from api.models import ChatRequest
|
19 |
from api.logger import setup_logger
|
|
|
23 |
|
24 |
logger = setup_logger(__name__)
|
25 |
|
|
|
26 |
# Helper function to create chat completion data
|
27 |
def create_chat_completion_data(
|
28 |
content: str,
|
|
|
45 |
"usage": None,
|
46 |
}
|
47 |
|
|
|
48 |
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
|
49 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
50 |
content = (
|
|
|
71 |
}
|
72 |
return {"role": message.role, "content": content}
|
73 |
|
|
|
74 |
# Function to strip model prefix from content if present
|
75 |
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
76 |
"""Remove the model prefix from the response content if present."""
|
|
|
79 |
return content[len(model_prefix):].strip()
|
80 |
return content
|
81 |
|
82 |
+
# Process streaming response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
async def process_streaming_response(request: ChatRequest):
|
84 |
+
logger.info(f"Model: {request.model}")
|
|
|
|
|
|
|
85 |
|
86 |
agent_mode = AGENT_MODE.get(request.model, {})
|
87 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
88 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
89 |
|
|
|
|
|
90 |
if request.model == 'o1-preview':
|
91 |
delay_seconds = random.randint(1, 60)
|
92 |
logger.info(
|
|
|
124 |
async with client.stream(
|
125 |
"POST",
|
126 |
f"{BASE_URL}/api/chat",
|
127 |
+
headers=headers,
|
128 |
json=json_data,
|
129 |
timeout=100,
|
130 |
) as response:
|
|
|
160 |
logger.error(f"Error occurred during request: {e}")
|
161 |
raise HTTPException(status_code=500, detail=str(e))
|
162 |
|
163 |
+
# Process non-streaming response
|
|
|
164 |
async def process_non_streaming_response(request: ChatRequest):
|
165 |
+
logger.info(f"Model: {request.model}")
|
|
|
|
|
|
|
166 |
|
167 |
agent_mode = AGENT_MODE.get(request.model, {})
|
168 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
169 |
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
170 |
|
|
|
|
|
171 |
if request.model == 'o1-preview':
|
172 |
delay_seconds = random.randint(20, 60)
|
173 |
logger.info(
|
|
|
205 |
try:
|
206 |
response = await client.post(
|
207 |
url=f"{BASE_URL}/api/chat",
|
208 |
+
headers=headers,
|
209 |
json=json_data,
|
210 |
)
|
211 |
response.raise_for_status()
|