Niansuh commited on
Commit
ae86464
·
verified ·
1 Parent(s): 5d00ccd

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +47 -45
api/utils.py CHANGED
@@ -3,15 +3,17 @@ import json
3
  import uuid
4
  import asyncio
5
  import random
 
6
  from typing import Any, Dict, Optional
7
 
8
  import httpx
9
  from fastapi import HTTPException
10
  from api.config import (
11
- get_headers_api_chat,
 
12
  BASE_URL,
13
  AGENT_MODE,
14
- TRENDING_AGENT_MODE,
15
  )
16
  from api.models import ChatRequest
17
  from api.logger import setup_logger
@@ -23,25 +25,47 @@ def generate_chat_id(length: int = 7) -> str:
23
  characters = string.ascii_letters + string.digits
24
  return ''.join(random.choices(characters, k=length))
25
 
26
- # Function to get the correct referer URL for logging
27
- def get_referer_url(chat_id: str, model: str) -> str:
28
- return f"{ BASE_URL}/chat/{chat_id}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Process streaming response with headers from config.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  async def process_streaming_response(request: ChatRequest):
32
  chat_id = generate_chat_id()
33
- referer_url = get_referer_url(chat_id, request.model)
34
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
35
-
36
  agent_mode = AGENT_MODE.get(request.model, {})
37
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
38
 
39
- headers_api_chat = get_headers_api_chat(referer_url)
40
-
41
- if request.model == 'o1-preview':
42
- delay_seconds = random.randint(1, 60)
43
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
44
- await asyncio.sleep(delay_seconds)
45
 
46
  json_data = {
47
  "agentMode": agent_mode,
@@ -61,7 +85,7 @@ async def process_streaming_response(request: ChatRequest):
61
  "previewToken": None,
62
  "trendingAgentMode": trending_agent_mode,
63
  "userId": None,
64
- "userSelectedModel": request.model,
65
  "userSystemPrompt": None,
66
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
67
  "visitFromDelta": False,
@@ -69,22 +93,12 @@ async def process_streaming_response(request: ChatRequest):
69
 
70
  async with httpx.AsyncClient() as client:
71
  try:
72
- async with client.stream(
73
- "POST",
74
- f"{BASE_URL}/api/chat",
75
- headers=headers_api_chat,
76
- json=json_data,
77
- timeout=100,
78
- ) as response:
79
  response.raise_for_status()
80
  async for line in response.aiter_lines():
81
  timestamp = int(datetime.now().timestamp())
82
  if line:
83
- content = line
84
- if content.startswith("$@$v=undefined-rv1$@$"):
85
- content = content[21:]
86
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
87
-
88
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
89
  yield "data: [DONE]\n\n"
90
  except httpx.HTTPStatusError as e:
@@ -94,21 +108,13 @@ async def process_streaming_response(request: ChatRequest):
94
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
95
  raise HTTPException(status_code=500, detail=str(e))
96
 
97
- # Process non-streaming response with headers from config.py
98
  async def process_non_streaming_response(request: ChatRequest):
99
  chat_id = generate_chat_id()
100
- referer_url = get_referer_url(chat_id, request.model)
101
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
102
-
103
  agent_mode = AGENT_MODE.get(request.model, {})
104
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
105
 
106
- headers_api_chat = get_headers_api_chat(referer_url)
107
-
108
- if request.model == 'o1-preview':
109
- delay_seconds = random.randint(20, 60)
110
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
111
- await asyncio.sleep(delay_seconds)
112
 
113
  json_data = {
114
  "agentMode": agent_mode,
@@ -128,7 +134,7 @@ async def process_non_streaming_response(request: ChatRequest):
128
  "previewToken": None,
129
  "trendingAgentMode": trending_agent_mode,
130
  "userId": None,
131
- "userSelectedModel": request.model,
132
  "userSystemPrompt": None,
133
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
134
  "visitFromDelta": False,
@@ -137,9 +143,7 @@ async def process_non_streaming_response(request: ChatRequest):
137
  full_response = ""
138
  async with httpx.AsyncClient() as client:
139
  try:
140
- async with client.stream(
141
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
142
- ) as response:
143
  response.raise_for_status()
144
  async for chunk in response.aiter_text():
145
  full_response += chunk
@@ -149,8 +153,6 @@ async def process_non_streaming_response(request: ChatRequest):
149
  except httpx.RequestError as e:
150
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
151
  raise HTTPException(status_code=500, detail=str(e))
152
- if full_response.startswith("$@$v=undefined-rv1$@$"):
153
- full_response = full_response[21:]
154
 
155
  return {
156
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -165,4 +167,4 @@ async def process_non_streaming_response(request: ChatRequest):
165
  }
166
  ],
167
  "usage": None,
168
- }
 
3
  import uuid
4
  import asyncio
5
  import random
6
+ import string
7
  from typing import Any, Dict, Optional
8
 
9
  import httpx
10
  from fastapi import HTTPException
11
  from api.config import (
12
+ MODEL_MAPPING,
13
+ get_headers,
14
  BASE_URL,
15
  AGENT_MODE,
16
+ TRENDING_AGENT_MODE
17
  )
18
  from api.models import ChatRequest
19
  from api.logger import setup_logger
 
25
  characters = string.ascii_letters + string.digits
26
  return ''.join(random.choices(characters, k=length))
27
 
28
+ # Helper function to create chat completion data
29
+ def create_chat_completion_data(
30
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
31
+ ) -> Dict[str, Any]:
32
+ return {
33
+ "id": f"chatcmpl-{uuid.uuid4()}",
34
+ "object": "chat.completion.chunk",
35
+ "created": timestamp,
36
+ "model": model,
37
+ "choices": [
38
+ {
39
+ "index": 0,
40
+ "delta": {"content": content, "role": "assistant"},
41
+ "finish_reason": finish_reason,
42
+ }
43
+ ],
44
+ "usage": None,
45
+ }
46
 
47
+ # Function to convert message to dictionary format
48
+ def message_to_dict(message):
49
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
50
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
51
+ return {
52
+ "role": message.role,
53
+ "content": content,
54
+ "data": {
55
+ "imageBase64": message.content[1]["image_url"]["url"],
56
+ "fileText": "",
57
+ "title": "snapshot",
58
+ },
59
+ }
60
+ return {"role": message.role, "content": content}
61
+
62
+ # Process streaming response
63
  async def process_streaming_response(request: ChatRequest):
64
  chat_id = generate_chat_id()
 
 
 
65
  agent_mode = AGENT_MODE.get(request.model, {})
66
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
67
 
68
+ headers = get_headers()
 
 
 
 
 
69
 
70
  json_data = {
71
  "agentMode": agent_mode,
 
85
  "previewToken": None,
86
  "trendingAgentMode": trending_agent_mode,
87
  "userId": None,
88
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
89
  "userSystemPrompt": None,
90
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
91
  "visitFromDelta": False,
 
93
 
94
  async with httpx.AsyncClient() as client:
95
  try:
96
+ async with client.stream("POST", f"{BASE_URL}/api/chat", headers=headers, json=json_data, timeout=100) as response:
 
 
 
 
 
 
97
  response.raise_for_status()
98
  async for line in response.aiter_lines():
99
  timestamp = int(datetime.now().timestamp())
100
  if line:
101
+ yield f"data: {json.dumps(create_chat_completion_data(line, request.model, timestamp))}\n\n"
 
 
 
 
102
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
103
  yield "data: [DONE]\n\n"
104
  except httpx.HTTPStatusError as e:
 
108
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
109
  raise HTTPException(status_code=500, detail=str(e))
110
 
111
+ # Process non-streaming response
112
  async def process_non_streaming_response(request: ChatRequest):
113
  chat_id = generate_chat_id()
 
 
 
114
  agent_mode = AGENT_MODE.get(request.model, {})
115
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
116
 
117
+ headers = get_headers()
 
 
 
 
 
118
 
119
  json_data = {
120
  "agentMode": agent_mode,
 
134
  "previewToken": None,
135
  "trendingAgentMode": trending_agent_mode,
136
  "userId": None,
137
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
138
  "userSystemPrompt": None,
139
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
140
  "visitFromDelta": False,
 
143
  full_response = ""
144
  async with httpx.AsyncClient() as client:
145
  try:
146
+ async with client.stream("POST", f"{BASE_URL}/api/chat", headers=headers, json=json_data) as response:
 
 
147
  response.raise_for_status()
148
  async for chunk in response.aiter_text():
149
  full_response += chunk
 
153
  except httpx.RequestError as e:
154
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
155
  raise HTTPException(status_code=500, detail=str(e))
 
 
156
 
157
  return {
158
  "id": f"chatcmpl-{uuid.uuid4()}",
 
167
  }
168
  ],
169
  "usage": None,
170
+ }