Niansuh commited on
Commit
23e063f
·
verified ·
1 Parent(s): f768e70

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +65 -177
api/utils.py CHANGED
@@ -1,181 +1,69 @@
1
- from datetime import datetime
2
- import json
3
- import uuid
4
- import asyncio
5
- import random
6
- import string
7
- from typing import Any, Dict, Optional
8
-
9
- import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- AGENT_MODE,
13
- TRENDING_AGENT_MODE,
14
- get_headers_api_chat,
15
- get_headers_chat,
16
- BASE_URL,
17
- )
18
- from api.models import ChatRequest
19
- from api.logger import setup_logger
20
-
21
- logger = setup_logger(__name__)
22
-
23
- # Updated message ID generator
24
- def generate_chat_id(length: int = 7) -> str:
25
- characters = string.ascii_letters + string.digits
26
- return ''.join(random.choice(characters) for _ in range(length))
27
-
28
- # Updated message_to_dict function (removed model prefixes)
29
- def message_to_dict(message):
30
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
31
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
32
- # Ensure base64 images are always included for all models
33
- return {
34
- "role": message.role,
35
- "content": content,
36
- "data": {
37
- "imageBase64": message.content[1]["image_url"]["url"],
38
- "fileText": "",
39
- "title": "snapshot",
40
- },
41
- }
42
- return {"role": message.role, "content": content}
43
-
44
- # Removed strip_model_prefix function as per your requirement
45
-
46
- # Function to get the correct referer URL
47
- def get_referer_url(chat_id: str, model: str) -> str:
48
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
49
- if model in MODEL_REFERERS:
50
- return f"{BASE_URL}{MODEL_REFERERS[model]}"
51
- return BASE_URL
52
-
53
- # Process streaming response with updated headers and data
54
- async def process_streaming_response(request: ChatRequest):
55
- chat_id = generate_chat_id()
56
- referer_url = get_referer_url(chat_id, request.model)
57
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
58
-
59
- agent_mode = AGENT_MODE.get(request.model, {})
60
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
61
-
62
- headers_api_chat = get_headers_api_chat(referer_url)
63
-
64
- json_data = {
65
- "messages": [message_to_dict(msg) for msg in request.messages],
66
- "id": chat_id,
67
- "previewToken": None,
68
- "userId": None,
69
- "codeModelMode": True,
70
- "agentMode": agent_mode,
71
- "trendingAgentMode": trending_agent_mode,
72
- "isMicMode": False,
73
- "userSystemPrompt": None,
74
- "maxTokens": request.max_tokens,
75
- "playgroundTopP": request.top_p,
76
- "playgroundTemperature": request.temperature,
77
- "isChromeExt": False,
78
- "githubToken": None,
79
- "clickedAnswer2": False,
80
- "clickedAnswer3": False,
81
- "clickedForceWebSearch": False,
82
- "visitFromDelta": False,
83
- "mobileClient": False,
84
- "userSelectedModel": request.model if request.model in ["gpt-4o", "gemini-pro", "claude-sonnet-3.5", "blackboxai-pro"] else None,
85
- "webSearchMode": False, # Adjust if needed
86
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
87
  }
88
 
89
- async with httpx.AsyncClient() as client:
90
- try:
91
- async with client.stream(
92
- "POST",
93
- f"{BASE_URL}/api/chat",
94
- headers=headers_api_chat,
95
- json=json_data,
96
- timeout=100,
97
- ) as response:
98
- response.raise_for_status()
99
- async for line in response.aiter_lines():
100
- timestamp = int(datetime.now().timestamp())
101
- if line:
102
- content = line.strip()
103
- # Handle special cases if necessary
104
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
105
-
106
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
107
- yield "data: [DONE]\n\n"
108
- except httpx.HTTPStatusError as e:
109
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
110
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
111
- except httpx.RequestError as e:
112
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
113
- raise HTTPException(status_code=500, detail=str(e))
114
-
115
- # Process non-streaming response with updated headers and data
116
- async def process_non_streaming_response(request: ChatRequest):
117
- chat_id = generate_chat_id()
118
- referer_url = get_referer_url(chat_id, request.model)
119
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
120
-
121
- agent_mode = AGENT_MODE.get(request.model, {})
122
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
123
-
124
- headers_api_chat = get_headers_api_chat(referer_url)
125
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
126
-
127
- json_data = {
128
- "messages": [message_to_dict(msg) for msg in request.messages],
129
- "id": chat_id,
130
- "previewToken": None,
131
- "userId": None,
132
- "codeModelMode": True,
133
- "agentMode": agent_mode,
134
- "trendingAgentMode": trending_agent_mode,
135
- "isMicMode": False,
136
- "userSystemPrompt": None,
137
- "maxTokens": request.max_tokens,
138
- "playgroundTopP": request.top_p,
139
- "playgroundTemperature": request.temperature,
140
- "isChromeExt": False,
141
- "githubToken": None,
142
- "clickedAnswer2": False,
143
- "clickedAnswer3": False,
144
- "clickedForceWebSearch": False,
145
- "visitFromDelta": False,
146
- "mobileClient": False,
147
- "userSelectedModel": request.model if request.model in ["gpt-4o", "gemini-pro", "claude-sonnet-3.5", "blackboxai-pro"] else None,
148
- "webSearchMode": False, # Adjust if needed
149
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
150
- }
151
-
152
- full_response = ""
153
- async with httpx.AsyncClient() as client:
154
- try:
155
- async with client.stream(
156
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
157
- ) as response:
158
- response.raise_for_status()
159
- async for chunk in response.aiter_text():
160
- full_response += chunk.strip()
161
- except httpx.HTTPStatusError as e:
162
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
163
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
164
- except httpx.RequestError as e:
165
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
166
- raise HTTPException(status_code=500, detail=str(e))
167
 
168
- return {
169
- "id": f"chatcmpl-{uuid.uuid4()}",
170
- "object": "chat.completion",
171
- "created": int(datetime.now().timestamp()),
172
- "model": request.model,
173
- "choices": [
174
- {
175
- "index": 0,
176
- "message": {"role": "assistant", "content": full_response},
177
- "finish_reason": "stop",
178
  }
179
- ],
180
- "usage": None,
181
- }
 
 
1
+ class Editee:
2
+ label = "Editee"
3
+ url = "https://editee.com"
4
+ api_endpoint = "https://editee.com/submit/chatgptfree"
5
+ working = True
6
+ supports_stream = True
7
+ supports_system_message = True
8
+ supports_message_history = True
9
+
10
+ default_model = 'claude'
11
+ models = ['claude', 'gpt4', 'gemini', 'mistrallarge']
12
+
13
+ model_aliases = {
14
+ "claude-3.5-sonnet": "claude",
15
+ "gpt-4o": "gpt4",
16
+ "gemini-pro": "gemini",
17
+ "mistral-large": "mistrallarge",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
 
20
+ @classmethod
21
+ def get_model(cls, model: str) -> str:
22
+ if model in cls.models:
23
+ return model
24
+ elif model in cls.model_aliases:
25
+ return cls.model_aliases[model]
26
+ else:
27
+ return cls.default_model
28
+
29
+ @classmethod
30
+ async def create_async_generator(
31
+ cls,
32
+ model: str,
33
+ messages: List[Dict[str, str]],
34
+ proxy: str = None,
35
+ **kwargs
36
+ ):
37
+ model = cls.get_model(model)
38
+
39
+ headers = {
40
+ "Accept": "application/json, text/plain, */*",
41
+ "Accept-Language": "en-US,en;q=0.9",
42
+ "Cache-Control": "no-cache",
43
+ "Content-Type": "application/json",
44
+ "Origin": cls.url,
45
+ "Pragma": "no-cache",
46
+ "Priority": "u=1, i",
47
+ "Referer": f"{cls.url}/chat-gpt",
48
+ "Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"',
49
+ "Sec-CH-UA-Mobile": '?0',
50
+ "Sec-CH-UA-Platform": '"Linux"',
51
+ "Sec-Fetch-Dest": 'empty',
52
+ "Sec-Fetch-Mode": 'cors',
53
+ "Sec-Fetch-Site": 'same-origin',
54
+ "User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
55
+ "X-Requested-With": 'XMLHttpRequest',
56
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ async with ClientSession(headers=headers) as session:
59
+ prompt = format_prompt(messages)
60
+ data = {
61
+ "user_input": prompt,
62
+ "context": " ",
63
+ "template_id": "",
64
+ "selected_model": model
 
 
 
65
  }
66
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
67
+ response.raise_for_status()
68
+ response_data = await response.json()
69
+ yield response_data['text']