Update api/utils.py
Browse files- api/utils.py +57 -114
api/utils.py
CHANGED
@@ -18,12 +18,10 @@ from api.config import (
|
|
18 |
)
|
19 |
from api.models import ChatRequest, Message
|
20 |
from api.logger import setup_logger
|
|
|
21 |
|
22 |
logger = setup_logger(__name__)
|
23 |
|
24 |
-
# Editee API endpoint
|
25 |
-
EDITE_API_ENDPOINT = "https://editee.com/submit/chatgptfree"
|
26 |
-
|
27 |
# Helper function to create a random alphanumeric chat ID
|
28 |
def generate_chat_id(length: int = 7) -> str:
|
29 |
characters = string.ascii_letters + string.digits
|
@@ -70,21 +68,6 @@ def message_to_dict(message: Message):
|
|
70 |
}
|
71 |
return {"role": message.role, "content": content}
|
72 |
|
73 |
-
# Function to strip model prefix from content if present (Removed as MODEL_PREFIXES is removed)
|
74 |
-
# def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
75 |
-
# """Remove the model prefix from the response content if present."""
|
76 |
-
# if model_prefix and content.startswith(model_prefix):
|
77 |
-
# logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
78 |
-
# return content[len(model_prefix):].strip()
|
79 |
-
# return content
|
80 |
-
|
81 |
-
# Function to get the correct referer URL for logging (Removed as MODEL_REFERERS is removed)
|
82 |
-
# def get_referer_url(chat_id: str, model: str) -> str:
|
83 |
-
# """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
|
84 |
-
# if model in MODEL_REFERERS:
|
85 |
-
# return f"{BASE_URL}/chat/{chat_id}?model={model}"
|
86 |
-
# return BASE_URL
|
87 |
-
|
88 |
# Function to resolve model aliases
|
89 |
def resolve_model(model: str) -> str:
|
90 |
if model in MODEL_MAPPING:
|
@@ -92,118 +75,78 @@ def resolve_model(model: str) -> str:
|
|
92 |
elif model in model_aliases:
|
93 |
return model_aliases[model]
|
94 |
else:
|
95 |
-
logger.warning(f"Model '{model}' not recognized. Using default model '{default_model}'.")
|
96 |
-
return
|
97 |
|
98 |
-
# Process streaming response with
|
99 |
async def process_streaming_response(request: ChatRequest) -> AsyncGenerator[str, None]:
|
100 |
chat_id = generate_chat_id()
|
101 |
resolved_model = resolve_model(request.model)
|
102 |
-
# referer_url = get_referer_url(chat_id, resolved_model) # Removed
|
103 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {resolved_model}")
|
104 |
|
105 |
-
#
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
response_data = response.json()
|
129 |
-
# Assuming response_data contains 'text' field
|
130 |
-
text = response_data.get('text', '')
|
131 |
-
timestamp = int(datetime.now().timestamp())
|
132 |
-
if text:
|
133 |
-
# cleaned_content = strip_model_prefix(text, model_prefix) # Removed
|
134 |
-
yield f"data: {json.dumps(create_chat_completion_data(text, resolved_model, timestamp))}\n\n"
|
135 |
-
|
136 |
-
# Indicate completion
|
137 |
-
yield f"data: {json.dumps(create_chat_completion_data('', resolved_model, timestamp, 'stop'))}\n\n"
|
138 |
-
yield "data: [DONE]\n\n"
|
139 |
-
except httpx.HTTPStatusError as e:
|
140 |
-
logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e.response.status_code} - {e.response.text}")
|
141 |
-
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
142 |
-
except httpx.RequestError as e:
|
143 |
-
logger.error(f"Request error occurred for Chat ID {chat_id}: {e}")
|
144 |
-
raise HTTPException(status_code=500, detail=str(e))
|
145 |
-
|
146 |
-
# Process non-streaming response with headers from config.py
|
147 |
async def process_non_streaming_response(request: ChatRequest) -> Dict[str, Any]:
|
148 |
chat_id = generate_chat_id()
|
149 |
resolved_model = resolve_model(request.model)
|
150 |
-
# referer_url = get_referer_url(chat_id, resolved_model) # Removed
|
151 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {resolved_model}")
|
152 |
|
153 |
-
#
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
"
|
162 |
-
"
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
async with httpx.AsyncClient() as client:
|
168 |
-
try:
|
169 |
-
response = await client.post(
|
170 |
-
EDITE_API_ENDPOINT,
|
171 |
-
headers=headers_api_chat,
|
172 |
-
json=data,
|
173 |
-
timeout=100,
|
174 |
-
)
|
175 |
-
response.raise_for_status()
|
176 |
-
response_data = response.json()
|
177 |
-
text = response_data.get('text', '')
|
178 |
-
# if text.startswith("$@$v=undefined-rv1$@$"):
|
179 |
-
# text = text[21:] # Removed
|
180 |
-
|
181 |
-
# cleaned_full_response = strip_model_prefix(text, model_prefix) # Removed
|
182 |
-
|
183 |
-
return {
|
184 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
185 |
-
"object": "chat.completion",
|
186 |
-
"created": int(datetime.now().timestamp()),
|
187 |
-
"model": resolved_model,
|
188 |
-
"choices": [
|
189 |
-
{
|
190 |
-
"index": 0,
|
191 |
-
"message": {"role": "assistant", "content": text},
|
192 |
-
"finish_reason": "stop",
|
193 |
-
}
|
194 |
-
],
|
195 |
-
"usage": None,
|
196 |
}
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
except httpx.RequestError as e:
|
201 |
-
logger.error(f"Request error occurred for Chat ID {chat_id}: {e}")
|
202 |
-
raise HTTPException(status_code=500, detail=str(e))
|
203 |
|
204 |
# Helper function to format prompt from messages
|
205 |
def format_prompt(messages: list[Message]) -> str:
|
206 |
-
# Implement the prompt formatting as per
|
207 |
# Placeholder implementation
|
208 |
formatted_messages = []
|
209 |
for msg in messages:
|
|
|
18 |
)
|
19 |
from api.models import ChatRequest, Message
|
20 |
from api.logger import setup_logger
|
21 |
+
from api.providers.gizai import GizAI # Import the GizAI provider
|
22 |
|
23 |
logger = setup_logger(__name__)
|
24 |
|
|
|
|
|
|
|
25 |
# Helper function to create a random alphanumeric chat ID
|
26 |
def generate_chat_id(length: int = 7) -> str:
|
27 |
characters = string.ascii_letters + string.digits
|
|
|
68 |
}
|
69 |
return {"role": message.role, "content": content}
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
# Function to resolve model aliases
|
72 |
def resolve_model(model: str) -> str:
|
73 |
if model in MODEL_MAPPING:
|
|
|
75 |
elif model in model_aliases:
|
76 |
return model_aliases[model]
|
77 |
else:
|
78 |
+
logger.warning(f"Model '{model}' not recognized. Using default model '{GizAI.default_model}'.")
|
79 |
+
return GizAI.default_model # default_model
|
80 |
|
81 |
+
# Process streaming response with GizAI provider
|
82 |
async def process_streaming_response(request: ChatRequest) -> AsyncGenerator[str, None]:
|
83 |
chat_id = generate_chat_id()
|
84 |
resolved_model = resolve_model(request.model)
|
|
|
85 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {resolved_model}")
|
86 |
|
87 |
+
# Instantiate the GizAI provider
|
88 |
+
gizai_provider = GizAI()
|
89 |
+
|
90 |
+
# Create the async generator
|
91 |
+
async for response in gizai_provider.create_async_generator(
|
92 |
+
model=resolved_model,
|
93 |
+
messages=request.messages,
|
94 |
+
proxy=request.proxy # Assuming 'proxy' is part of ChatRequest; if not, adjust accordingly
|
95 |
+
):
|
96 |
+
timestamp = int(datetime.now().timestamp())
|
97 |
+
if isinstance(response, ImageResponse):
|
98 |
+
# Handle image responses
|
99 |
+
yield f"data: {json.dumps({'image_url': response.images, 'alt': response.alt})}\n\n"
|
100 |
+
else:
|
101 |
+
# Handle text responses
|
102 |
+
yield f"data: {json.dumps(create_chat_completion_data(response, resolved_model, timestamp))}\n\n"
|
103 |
+
|
104 |
+
# Indicate completion
|
105 |
+
timestamp = int(datetime.now().timestamp())
|
106 |
+
yield f"data: {json.dumps(create_chat_completion_data('', resolved_model, timestamp, 'stop'))}\n\n"
|
107 |
+
yield "data: [DONE]\n\n"
|
108 |
+
|
109 |
+
# Process non-streaming response with GizAI provider
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
async def process_non_streaming_response(request: ChatRequest) -> Dict[str, Any]:
|
111 |
chat_id = generate_chat_id()
|
112 |
resolved_model = resolve_model(request.model)
|
|
|
113 |
logger.info(f"Generated Chat ID: {chat_id} - Model: {resolved_model}")
|
114 |
|
115 |
+
# Instantiate the GizAI provider
|
116 |
+
gizai_provider = GizAI()
|
117 |
+
|
118 |
+
# Collect the responses
|
119 |
+
responses = []
|
120 |
+
async for response in gizai_provider.create_async_generator(
|
121 |
+
model=resolved_model,
|
122 |
+
messages=request.messages,
|
123 |
+
proxy=request.proxy # Assuming 'proxy' is part of ChatRequest; if not, adjust accordingly
|
124 |
+
):
|
125 |
+
if isinstance(response, ImageResponse):
|
126 |
+
# For image responses, collect image URLs
|
127 |
+
responses.append({"image_url": response.images, "alt": response.alt})
|
128 |
+
else:
|
129 |
+
# For text responses, append the text
|
130 |
+
responses.append(response)
|
131 |
|
132 |
+
return {
|
133 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
134 |
+
"object": "chat.completion",
|
135 |
+
"created": int(datetime.now().timestamp()),
|
136 |
+
"model": resolved_model,
|
137 |
+
"choices": [
|
138 |
+
{
|
139 |
+
"index": 0,
|
140 |
+
"message": {"role": "assistant", "content": responses},
|
141 |
+
"finish_reason": "stop",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
}
|
143 |
+
],
|
144 |
+
"usage": None,
|
145 |
+
}
|
|
|
|
|
|
|
146 |
|
147 |
# Helper function to format prompt from messages
|
148 |
def format_prompt(messages: list[Message]) -> str:
|
149 |
+
# Implement the prompt formatting as per GizAI's requirements
|
150 |
# Placeholder implementation
|
151 |
formatted_messages = []
|
152 |
for msg in messages:
|