Update api/utils.py
Browse files- api/utils.py +8 -56
api/utils.py
CHANGED
@@ -3,18 +3,15 @@ import json
|
|
3 |
import uuid
|
4 |
import asyncio
|
5 |
import random
|
6 |
-
import string
|
7 |
from typing import Any, Dict, Optional
|
8 |
|
9 |
import httpx
|
10 |
from fastapi import HTTPException
|
11 |
from api.config import (
|
12 |
get_headers_api_chat,
|
13 |
-
get_headers_chat,
|
14 |
BASE_URL,
|
15 |
AGENT_MODE,
|
16 |
TRENDING_AGENT_MODE,
|
17 |
-
ALLOWED_MODELS
|
18 |
)
|
19 |
from api.models import ChatRequest
|
20 |
from api.logger import setup_logger
|
@@ -26,54 +23,16 @@ def generate_chat_id(length: int = 7) -> str:
|
|
26 |
characters = string.ascii_letters + string.digits
|
27 |
return ''.join(random.choices(characters, k=length))
|
28 |
|
29 |
-
#
|
30 |
-
def
|
31 |
-
|
32 |
-
) -> Dict[str, Any]:
|
33 |
-
return {
|
34 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
35 |
-
"object": "chat.completion.chunk",
|
36 |
-
"created": timestamp,
|
37 |
-
"model": model,
|
38 |
-
"choices": [
|
39 |
-
{
|
40 |
-
"index": 0,
|
41 |
-
"delta": {"content": content, "role": "assistant"},
|
42 |
-
"finish_reason": finish_reason,
|
43 |
-
}
|
44 |
-
],
|
45 |
-
"usage": None,
|
46 |
-
}
|
47 |
-
|
48 |
-
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
|
49 |
-
def message_to_dict(message):
|
50 |
-
content = message.content if isinstance(message.content, str) else message.content[0]["text"]
|
51 |
-
if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
|
52 |
-
# Ensure base64 images are always included for all models
|
53 |
-
return {
|
54 |
-
"role": message.role,
|
55 |
-
"content": content,
|
56 |
-
"data": {
|
57 |
-
"imageBase64": message.content[1]["image_url"]["url"],
|
58 |
-
"fileText": "",
|
59 |
-
"title": "snapshot",
|
60 |
-
},
|
61 |
-
}
|
62 |
-
return {"role": message.role, "content": content}
|
63 |
-
|
64 |
-
# Function to strip model prefix from content if present
|
65 |
-
def strip_model_prefix(content: str) -> str:
|
66 |
-
return content.strip()
|
67 |
|
68 |
# Process streaming response with headers from config.py
|
69 |
async def process_streaming_response(request: ChatRequest):
|
70 |
chat_id = generate_chat_id()
|
71 |
-
referer_url =
|
72 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
|
73 |
|
74 |
-
if request.model not in ALLOWED_MODELS:
|
75 |
-
raise HTTPException(status_code=400, detail="Invalid model specified.")
|
76 |
-
|
77 |
agent_mode = AGENT_MODE.get(request.model, {})
|
78 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
79 |
|
@@ -124,8 +83,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
124 |
content = line
|
125 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
126 |
content = content[21:]
|
127 |
-
|
128 |
-
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
129 |
|
130 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
131 |
yield "data: [DONE]\n\n"
|
@@ -139,17 +97,13 @@ async def process_streaming_response(request: ChatRequest):
|
|
139 |
# Process non-streaming response with headers from config.py
|
140 |
async def process_non_streaming_response(request: ChatRequest):
|
141 |
chat_id = generate_chat_id()
|
142 |
-
referer_url =
|
143 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
|
144 |
|
145 |
-
if request.model not in ALLOWED_MODELS:
|
146 |
-
raise HTTPException(status_code=400, detail="Invalid model specified.")
|
147 |
-
|
148 |
agent_mode = AGENT_MODE.get(request.model, {})
|
149 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
150 |
|
151 |
headers_api_chat = get_headers_api_chat(referer_url)
|
152 |
-
headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
|
153 |
|
154 |
if request.model == 'o1-preview':
|
155 |
delay_seconds = random.randint(20, 60)
|
@@ -198,8 +152,6 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
198 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
199 |
full_response = full_response[21:]
|
200 |
|
201 |
-
cleaned_full_response = strip_model_prefix(full_response)
|
202 |
-
|
203 |
return {
|
204 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
205 |
"object": "chat.completion",
|
@@ -208,9 +160,9 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
208 |
"choices": [
|
209 |
{
|
210 |
"index": 0,
|
211 |
-
"message": {"role": "assistant", "content":
|
212 |
"finish_reason": "stop",
|
213 |
}
|
214 |
],
|
215 |
"usage": None,
|
216 |
-
}
|
|
|
3 |
import uuid
|
4 |
import asyncio
|
5 |
import random
|
|
|
6 |
from typing import Any, Dict, Optional
|
7 |
|
8 |
import httpx
|
9 |
from fastapi import HTTPException
|
10 |
from api.config import (
|
11 |
get_headers_api_chat,
|
|
|
12 |
BASE_URL,
|
13 |
AGENT_MODE,
|
14 |
TRENDING_AGENT_MODE,
|
|
|
15 |
)
|
16 |
from api.models import ChatRequest
|
17 |
from api.logger import setup_logger
|
|
|
23 |
characters = string.ascii_letters + string.digits
|
24 |
return ''.join(random.choices(characters, k=length))
|
25 |
|
26 |
+
# Function to get the correct referer URL for logging
|
27 |
+
def get_referer_url(chat_id: str, model: str) -> str:
|
28 |
+
return f"{ BASE_URL}/chat/{chat_id}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Process streaming response with headers from config.py
|
31 |
async def process_streaming_response(request: ChatRequest):
|
32 |
chat_id = generate_chat_id()
|
33 |
+
referer_url = get_referer_url(chat_id, request.model)
|
34 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
|
35 |
|
|
|
|
|
|
|
36 |
agent_mode = AGENT_MODE.get(request.model, {})
|
37 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
38 |
|
|
|
83 |
content = line
|
84 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
85 |
content = content[21:]
|
86 |
+
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
|
|
87 |
|
88 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
89 |
yield "data: [DONE]\n\n"
|
|
|
97 |
# Process non-streaming response with headers from config.py
|
98 |
async def process_non_streaming_response(request: ChatRequest):
|
99 |
chat_id = generate_chat_id()
|
100 |
+
referer_url = get_referer_url(chat_id, request.model)
|
101 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
|
102 |
|
|
|
|
|
|
|
103 |
agent_mode = AGENT_MODE.get(request.model, {})
|
104 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
105 |
|
106 |
headers_api_chat = get_headers_api_chat(referer_url)
|
|
|
107 |
|
108 |
if request.model == 'o1-preview':
|
109 |
delay_seconds = random.randint(20, 60)
|
|
|
152 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
153 |
full_response = full_response[21:]
|
154 |
|
|
|
|
|
155 |
return {
|
156 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
157 |
"object": "chat.completion",
|
|
|
160 |
"choices": [
|
161 |
{
|
162 |
"index": 0,
|
163 |
+
"message": {"role": "assistant", "content": full_response},
|
164 |
"finish_reason": "stop",
|
165 |
}
|
166 |
],
|
167 |
"usage": None,
|
168 |
+
}
|