Niansuh commited on
Commit
ba990bd
·
verified ·
1 Parent(s): 8f48279

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +28 -29
api/utils.py CHANGED
@@ -9,13 +9,16 @@ from typing import Any, Dict, Optional
9
  import httpx
10
  from fastapi import HTTPException
11
  from api.config import (
12
- models,
13
- model_aliases,
14
- agentMode,
15
- trendingAgentMode,
16
- get_headers_api_chat,
17
  BASE_URL,
 
 
18
  )
 
 
 
 
19
 
20
  # Helper function to create a random alphanumeric chat ID
21
  def generate_chat_id(length: int = 7) -> str:
@@ -45,7 +48,6 @@ def create_chat_completion_data(
45
  def message_to_dict(message):
46
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
47
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
48
- # Ensure base64 images are always included for all models
49
  return {
50
  "role": message.role,
51
  "content": content,
@@ -58,20 +60,21 @@ def message_to_dict(message):
58
  return {"role": message.role, "content": content}
59
 
60
  # Process streaming response with headers from config.py
61
- async def process_streaming_response(request):
62
  chat_id = generate_chat_id()
63
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model}")
64
-
65
- agent_mode = agentMode.get(request.model, {})
66
- trending_agent_mode = trendingAgentMode.get(request.model, {})
67
 
68
- headers_api_chat = get_headers_api_chat(f"{BASE_URL}/?model={request.model}")
 
 
 
69
 
70
  json_data = {
71
  "agentMode": agent_mode,
72
  "clickedAnswer2": False,
73
  "clickedAnswer3": False,
74
- "clickedForceWebSearch ": False,
75
  "codeModelMode": True,
76
  "githubToken": None,
77
  "id": chat_id,
@@ -85,7 +88,7 @@ async def process_streaming_response(request):
85
  "previewToken": None,
86
  "trendingAgentMode": trending_agent_mode,
87
  "userId": None,
88
- "userSelectedModel": request.model,
89
  "userSystemPrompt": None,
90
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
91
  "visitFromDelta": False,
@@ -96,7 +99,7 @@ async def process_streaming_response(request):
96
  async with client.stream(
97
  "POST",
98
  f"{BASE_URL}/api/chat",
99
- headers=headers_api_chat,
100
  json=json_data,
101
  timeout=100,
102
  ) as response:
@@ -105,10 +108,7 @@ async def process_streaming_response(request):
105
  timestamp = int(datetime.now().timestamp())
106
  if line:
107
  content = line
108
- if content.startswith("$@$v=undefined-rv1$@$"):
109
- content = content[21:]
110
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
111
-
112
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
113
  yield "data: [DONE]\n\n"
114
  except httpx.HTTPStatusError as e:
@@ -119,14 +119,15 @@ async def process_streaming_response(request):
119
  raise HTTPException(status_code=500, detail=str(e))
120
 
121
  # Process non-streaming response with headers from config.py
122
- async def process_non_streaming_response(request):
123
  chat_id = generate_chat_id()
124
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model}")
125
-
126
- agent_mode = agentMode.get(request.model, {})
127
- trending_agent_mode = trendingAgentMode.get(request.model, {})
128
 
129
- headers_api_chat = get_headers_api_chat(f"{BASE_URL}/?model={request.model}")
 
 
 
130
 
131
  json_data = {
132
  "agentMode": agent_mode,
@@ -146,7 +147,7 @@ async def process_non_streaming_response(request):
146
  "previewToken": None,
147
  "trendingAgentMode": trending_agent_mode,
148
  "userId": None,
149
- "userSelectedModel": request.model,
150
  "userSystemPrompt": None,
151
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
152
  "visitFromDelta": False,
@@ -156,7 +157,7 @@ async def process_non_streaming_response(request):
156
  async with httpx.AsyncClient() as client:
157
  try:
158
  async with client.stream(
159
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
160
  ) as response:
161
  response.raise_for_status()
162
  async for chunk in response.aiter_text():
@@ -167,8 +168,6 @@ async def process_non_streaming_response(request):
167
  except httpx.RequestError as e:
168
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
169
  raise HTTPException(status_code=500, detail=str(e))
170
- if full_response.startswith("$@$v=undefined-rv1$@$"):
171
- full_response = full_response[21:]
172
 
173
  return {
174
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -183,4 +182,4 @@ async def process_non_streaming_response(request):
183
  }
184
  ],
185
  "usage": None,
186
- }
 
9
  import httpx
10
  from fastapi import HTTPException
11
  from api.config import (
12
+ MODEL_MAPPING,
13
+ common_headers,
 
 
 
14
  BASE_URL,
15
+ AGENT_MODE,
16
+ TRENDING_AGENT_MODE,
17
  )
18
+ from api.models import ChatRequest
19
+ from api.logger import setup_logger
20
+
21
+ logger = setup_logger(__name__)
22
 
23
  # Helper function to create a random alphanumeric chat ID
24
  def generate_chat_id(length: int = 7) -> str:
 
48
  def message_to_dict(message):
49
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
50
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
 
51
  return {
52
  "role": message.role,
53
  "content": content,
 
60
  return {"role": message.role, "content": content}
61
 
62
  # Process streaming response with headers from config.py
63
+ async def process_streaming_response(request: ChatRequest):
64
  chat_id = generate_chat_id()
65
+ agent_mode = AGENT_MODE.get(request.model, {})
66
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
 
67
 
68
+ if request.model == 'o1-preview':
69
+ delay_seconds = random.randint(1, 60)
70
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
71
+ await asyncio.sleep(delay_seconds)
72
 
73
  json_data = {
74
  "agentMode": agent_mode,
75
  "clickedAnswer2": False,
76
  "clickedAnswer3": False,
77
+ "clickedForceWebSearch": False,
78
  "codeModelMode": True,
79
  "githubToken": None,
80
  "id": chat_id,
 
88
  "previewToken": None,
89
  "trendingAgentMode": trending_agent_mode,
90
  "userId": None,
91
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
92
  "userSystemPrompt": None,
93
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
94
  "visitFromDelta": False,
 
99
  async with client.stream(
100
  "POST",
101
  f"{BASE_URL}/api/chat",
102
+ headers=common_headers,
103
  json=json_data,
104
  timeout=100,
105
  ) as response:
 
108
  timestamp = int(datetime.now().timestamp())
109
  if line:
110
  content = line
 
 
111
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
112
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
113
  yield "data: [DONE]\n\n"
114
  except httpx.HTTPStatusError as e:
 
119
  raise HTTPException(status_code=500, detail=str(e))
120
 
121
  # Process non-streaming response with headers from config.py
122
+ async def process_non_streaming_response(request: ChatRequest):
123
  chat_id = generate_chat_id()
124
+ agent_mode = AGENT_MODE.get(request.model, {})
125
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
 
126
 
127
+ if request.model == 'o1-preview':
128
+ delay_seconds = random.randint(20, 60)
129
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
130
+ await asyncio.sleep(delay_seconds)
131
 
132
  json_data = {
133
  "agentMode": agent_mode,
 
147
  "previewToken": None,
148
  "trendingAgentMode": trending_agent_mode,
149
  "userId": None,
150
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
151
  "userSystemPrompt": None,
152
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
153
  "visitFromDelta": False,
 
157
  async with httpx.AsyncClient() as client:
158
  try:
159
  async with client.stream(
160
+ method="POST", url=f"{BASE_URL}/api/chat", headers=common_headers, json=json_data
161
  ) as response:
162
  response.raise_for_status()
163
  async for chunk in response.aiter_text():
 
168
  except httpx.RequestError as e:
169
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
170
  raise HTTPException(status_code=500, detail=str(e))
 
 
171
 
172
  return {
173
  "id": f"chatcmpl-{uuid.uuid4()}",
 
182
  }
183
  ],
184
  "usage": None,
185
+ }