Niansuh commited on
Commit
21339c1
·
verified ·
1 Parent(s): 8a8e797

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +86 -84
api/utils.py CHANGED
@@ -3,12 +3,14 @@ import json
3
  from typing import Any, Dict, Optional
4
 
5
  import httpx
 
6
  from api.config import (
 
 
7
  MODEL_MAPPING,
8
- headers,
9
  AGENT_MODE,
10
  TRENDING_AGENT_MODE,
11
- BASE_URL,
12
  MODEL_PREFIXES,
13
  MODEL_REFERERS
14
  )
@@ -18,30 +20,38 @@ from api.models import ChatRequest
18
  from api.logger import setup_logger
19
 
20
  import uuid
21
- import asyncio
22
- import random # Newly added imports
23
 
24
  logger = setup_logger(__name__)
25
 
26
- def create_chat_completion_data(
27
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
- ) -> Dict[str, Any]:
29
- return {
30
- "id": f"chatcmpl-{uuid.uuid4()}",
31
- "object": "chat.completion.chunk",
32
- "created": timestamp,
33
- "model": model,
34
- "choices": [
35
- {
36
- "index": 0,
37
- "delta": {"content": content, "role": "assistant"},
38
- "finish_reason": finish_reason,
39
- }
40
- ],
41
- "usage": None,
42
  }
43
 
44
- def message_to_dict(message, model_prefix: Optional[str] = None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  if isinstance(message.content, str):
46
  content = message.content
47
  if model_prefix:
@@ -71,50 +81,35 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
71
  logger.debug("No prefix to strip from content.")
72
  return content
73
 
74
- async def process_streaming_response(request: ChatRequest):
75
  agent_mode = AGENT_MODE.get(request.model, {})
76
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
77
  model_prefix = MODEL_PREFIXES.get(request.model, "")
78
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
79
- referer_url = f"{BASE_URL}{referer_path}"
80
 
81
  # Update headers with dynamic Referer
82
- dynamic_headers = headers.copy()
83
- dynamic_headers['Referer'] = referer_url
 
 
 
 
84
 
85
- # Introduce delay for 'o1-preview' model
86
- if request.model == 'o1-preview':
87
- delay_seconds = random.randint(20, 60)
88
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
89
- await asyncio.sleep(delay_seconds)
90
 
91
  json_data = {
92
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
93
- "previewToken": None,
94
- "userId": None,
95
- "codeModelMode": True,
96
- "agentMode": agent_mode,
97
- "trendingAgentMode": trending_agent_mode,
98
- "isMicMode": False,
99
- "userSystemPrompt": None,
100
- "maxTokens": request.max_tokens,
101
- "playgroundTopP": request.top_p,
102
- "playgroundTemperature": request.temperature,
103
- "isChromeExt": False,
104
- "githubToken": None,
105
- "clickedAnswer2": False,
106
- "clickedAnswer3": False,
107
- "clickedForceWebSearch": False,
108
- "visitFromDelta": False,
109
- "mobileClient": False,
110
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
111
  }
112
 
113
  async with httpx.AsyncClient() as client:
114
  try:
115
  async with client.stream(
116
  "POST",
117
- f"{BASE_URL}/api/chat",
118
  headers=dynamic_headers,
119
  json=json_data,
120
  timeout=100,
@@ -139,59 +134,48 @@ async def process_streaming_response(request: ChatRequest):
139
  logger.error(f"Error occurred during request: {e}")
140
  raise HTTPException(status_code=500, detail=str(e))
141
 
142
- async def process_non_streaming_response(request: ChatRequest):
143
  agent_mode = AGENT_MODE.get(request.model, {})
144
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
145
  model_prefix = MODEL_PREFIXES.get(request.model, "")
146
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
147
- referer_url = f"{BASE_URL}{referer_path}"
148
 
149
  # Update headers with dynamic Referer
150
- dynamic_headers = headers.copy()
151
- dynamic_headers['Referer'] = referer_url
 
 
 
 
152
 
153
- # Introduce delay for 'o1-preview' model
154
- if request.model == 'o1-preview':
155
- delay_seconds = random.randint(20, 60)
156
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
157
- await asyncio.sleep(delay_seconds)
158
 
159
  json_data = {
160
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
161
- "previewToken": None,
162
- "userId": None,
163
- "codeModelMode": True,
164
- "agentMode": agent_mode,
165
- "trendingAgentMode": trending_agent_mode,
166
- "isMicMode": False,
167
- "userSystemPrompt": None,
168
- "maxTokens": request.max_tokens,
169
- "playgroundTopP": request.top_p,
170
- "playgroundTemperature": request.temperature,
171
- "isChromeExt": False,
172
- "githubToken": None,
173
- "clickedAnswer2": False,
174
- "clickedAnswer3": False,
175
- "clickedForceWebSearch": False,
176
- "visitFromDelta": False,
177
- "mobileClient": False,
178
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
179
  }
 
180
  full_response = ""
181
  async with httpx.AsyncClient() as client:
182
  try:
183
- async with client.stream(
184
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
185
- ) as response:
186
- response.raise_for_status()
187
- async for chunk in response.aiter_text():
188
- full_response += chunk
 
 
189
  except httpx.HTTPStatusError as e:
190
  logger.error(f"HTTP error occurred: {e}")
191
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
192
  except httpx.RequestError as e:
193
  logger.error(f"Error occurred during request: {e}")
194
  raise HTTPException(status_code=500, detail=str(e))
 
195
  if full_response.startswith("$@$v=undefined-rv1$@$"):
196
  full_response = full_response[21:]
197
 
@@ -212,3 +196,21 @@ async def process_non_streaming_response(request: ChatRequest):
212
  ],
213
  "usage": None,
214
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from typing import Any, Dict, Optional
4
 
5
  import httpx
6
+ import aiohttp # Add this import
7
  from api.config import (
8
+ DDG_API_ENDPOINT, # Updated
9
+ DDG_STATUS_URL, # Updated
10
  MODEL_MAPPING,
11
+ ALLOWED_MODELS,
12
  AGENT_MODE,
13
  TRENDING_AGENT_MODE,
 
14
  MODEL_PREFIXES,
15
  MODEL_REFERERS
16
  )
 
20
  from api.logger import setup_logger
21
 
22
  import uuid
 
 
23
 
24
  logger = setup_logger(__name__)
25
 
26
+ # Remove Blackbox-specific functions and add DDG-specific functions
27
+
28
+ # New DDG Integration
29
+ async def get_ddg_vqd():
30
+ status_url = DDG_STATUS_URL
31
+
32
+ headers = {
33
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
34
+ 'Accept': 'text/event-stream',
35
+ 'x-vqd-accept': '1'
 
 
 
 
 
 
36
  }
37
 
38
+ async with aiohttp.ClientSession() as session:
39
+ try:
40
+ async with session.get(status_url, headers=headers) as response:
41
+ if response.status == 200:
42
+ return response.headers.get("x-vqd-4")
43
+ else:
44
+ logger.error(f"Error: Status code {response.status} when fetching VQD")
45
+ return None
46
+ except Exception as e:
47
+ logger.error(f"Error getting VQD: {e}")
48
+ return None
49
+
50
+ def format_prompt(messages):
51
+ """Helper function to format messages into a prompt string."""
52
+ return "\n".join([f"{msg.role}: {msg.content}" for msg in messages])
53
+
54
+ def message_to_dict_ddg(message, model_prefix: Optional[str] = None):
55
  if isinstance(message.content, str):
56
  content = message.content
57
  if model_prefix:
 
81
  logger.debug("No prefix to strip from content.")
82
  return content
83
 
84
+ async def process_ddg_streaming_response(request: ChatRequest):
85
  agent_mode = AGENT_MODE.get(request.model, {})
86
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
87
  model_prefix = MODEL_PREFIXES.get(request.model, "")
88
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
89
+ referer_url = f"https://duckduckgo.com{referer_path}" # Updated BASE_URL
90
 
91
  # Update headers with dynamic Referer
92
+ dynamic_headers = {
93
+ 'accept': 'text/event-stream',
94
+ 'content-type': 'application/json',
95
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
96
+ 'x-vqd-4': await get_ddg_vqd()
97
+ }
98
 
99
+ if not dynamic_headers['x-vqd-4']:
100
+ raise HTTPException(status_code=500, detail="Failed to obtain VQD token")
 
 
 
101
 
102
  json_data = {
103
+ "model": MODEL_MAPPING.get(request.model, request.model),
104
+ "messages": [message_to_dict_ddg(msg, model_prefix=model_prefix) for msg in request.messages],
105
+ # Add additional fields if required by DDG API
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  }
107
 
108
  async with httpx.AsyncClient() as client:
109
  try:
110
  async with client.stream(
111
  "POST",
112
+ DDG_API_ENDPOINT,
113
  headers=dynamic_headers,
114
  json=json_data,
115
  timeout=100,
 
134
  logger.error(f"Error occurred during request: {e}")
135
  raise HTTPException(status_code=500, detail=str(e))
136
 
137
+ async def process_ddg_non_streaming_response(request: ChatRequest):
138
  agent_mode = AGENT_MODE.get(request.model, {})
139
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
140
  model_prefix = MODEL_PREFIXES.get(request.model, "")
141
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
142
+ referer_url = f"https://duckduckgo.com{referer_path}" # Updated BASE_URL
143
 
144
  # Update headers with dynamic Referer
145
+ dynamic_headers = {
146
+ 'accept': 'application/json',
147
+ 'content-type': 'application/json',
148
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
149
+ 'x-vqd-4': await get_ddg_vqd()
150
+ }
151
 
152
+ if not dynamic_headers['x-vqd-4']:
153
+ raise HTTPException(status_code=500, detail="Failed to obtain VQD token")
 
 
 
154
 
155
  json_data = {
156
+ "model": MODEL_MAPPING.get(request.model, request.model),
157
+ "messages": [message_to_dict_ddg(msg, model_prefix=model_prefix) for msg in request.messages],
158
+ # Add additional fields if required by DDG API
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  }
160
+
161
  full_response = ""
162
  async with httpx.AsyncClient() as client:
163
  try:
164
+ response = await client.post(
165
+ DDG_API_ENDPOINT,
166
+ headers=dynamic_headers,
167
+ json=json_data,
168
+ timeout=100
169
+ )
170
+ response.raise_for_status()
171
+ full_response = response.text
172
  except httpx.HTTPStatusError as e:
173
  logger.error(f"HTTP error occurred: {e}")
174
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
175
  except httpx.RequestError as e:
176
  logger.error(f"Error occurred during request: {e}")
177
  raise HTTPException(status_code=500, detail=str(e))
178
+
179
  if full_response.startswith("$@$v=undefined-rv1$@$"):
180
  full_response = full_response[21:]
181
 
 
196
  ],
197
  "usage": None,
198
  }
199
+
200
+ def create_chat_completion_data(
201
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
202
+ ) -> Dict[str, Any]:
203
+ return {
204
+ "id": f"chatcmpl-{uuid.uuid4()}",
205
+ "object": "chat.completion.chunk",
206
+ "created": timestamp,
207
+ "model": model,
208
+ "choices": [
209
+ {
210
+ "index": 0,
211
+ "delta": {"content": content, "role": "assistant"},
212
+ "finish_reason": finish_reason,
213
+ }
214
+ ],
215
+ "usage": None,
216
+ }