AIMaster7 commited on
Commit
c18e9c8
·
verified ·
1 Parent(s): 4f72e24

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +96 -205
main.py CHANGED
@@ -5,6 +5,7 @@ import secrets
5
  import string
6
  import time
7
  import tempfile
 
8
  from typing import List, Optional, Union, Any
9
 
10
  import httpx
@@ -18,8 +19,6 @@ from gradio_client import Client, handle_file
18
 
19
  # --- Configuration ---
20
  load_dotenv()
21
-
22
- # Env variables for external services
23
  IMAGE_API_URL = os.environ.get("IMAGE_API_URL", "https://image.api.example.com")
24
  SNAPZION_UPLOAD_URL = "https://upload.snapzion.com/api/public-upload"
25
  SNAPZION_API_KEY = os.environ.get("SNAP", "")
@@ -42,9 +41,8 @@ MODEL_ALIASES = {}
42
  app = FastAPI(
43
  title="OpenAI Compatible API",
44
  description="An adapter for various services to be compatible with the OpenAI API specification.",
45
- version="1.1.1" # Incremented version for the fix
46
  )
47
-
48
  try:
49
  ocr_client = Client("multimodalart/Florence-2-l4")
50
  except Exception as e:
@@ -56,28 +54,23 @@ except Exception as e:
56
  class Message(BaseModel):
57
  role: str
58
  content: str
59
-
60
  class ChatRequest(BaseModel):
61
  messages: List[Message]
62
  model: str
63
  stream: Optional[bool] = False
64
  tools: Optional[Any] = None
65
-
66
  class ImageGenerationRequest(BaseModel):
67
  prompt: str
68
  aspect_ratio: Optional[str] = "1:1"
69
  n: Optional[int] = 1
70
  user: Optional[str] = None
71
  model: Optional[str] = "default"
72
-
73
  class ModerationRequest(BaseModel):
74
  input: Union[str, List[str]]
75
  model: Optional[str] = "text-moderation-stable"
76
-
77
  class OcrRequest(BaseModel):
78
  image_url: Optional[str] = Field(None, description="URL of the image to process.")
79
  image_b64: Optional[str] = Field(None, description="Base64 encoded string of the image to process.")
80
-
81
  @model_validator(mode='before')
82
  @classmethod
83
  def check_sources(cls, data: Any) -> Any:
@@ -87,7 +80,6 @@ class OcrRequest(BaseModel):
87
  if data.get('image_url') and data.get('image_b64'):
88
  raise ValueError('Provide either image_url or image_b64, not both.')
89
  return data
90
-
91
  class OcrResponse(BaseModel):
92
  ocr_text: str
93
  raw_response: dict
@@ -102,192 +94,102 @@ def generate_random_id(prefix: str, length: int = 29) -> str:
102
 
103
  @app.get("/v1/models", tags=["Models"])
104
  async def list_models():
105
- """Lists the available models."""
106
  return {"object": "list", "data": AVAILABLE_MODELS}
107
 
108
- # (Chat, Image Generation, and Moderation endpoints are unchanged)
109
  @app.post("/v1/chat/completions", tags=["Chat"])
110
  async def chat_completion(request: ChatRequest):
111
- """Handles chat completion requests, supporting streaming and non-streaming."""
112
- model_id = MODEL_ALIASES.get(request.model, request.model)
113
- chat_id = generate_random_id("chatcmpl-")
114
- headers = {
115
- 'accept': 'text/event-stream',
116
- 'content-type': 'application/json',
117
- 'origin': 'https://www.chatwithmono.xyz',
118
- 'referer': 'https://www.chatwithmono.xyz/',
119
- 'user-agent': 'Mozilla/5.0',
120
- }
121
-
122
  if request.tools:
123
- tool_prompt = f"""You have access to the following tools. To call a tool, please respond with JSON for a tool call within <tool_call></tool_call> XML tags. Respond in the format {{"name": tool name, "parameters": dictionary of argument name and its value}}. Do not use variables.
124
  Tools: {";".join(f"<tool>{tool}</tool>" for tool in request.tools)}
125
  Response Format for tool call:
126
  <tool_call>
127
  {{"name": <function-name>, "arguments": <args-json-object>}}
128
  </tool_call>"""
129
- if request.messages[0].role == "system":
130
- request.messages[0].content += "\n\n" + tool_prompt
131
- else:
132
- request.messages.insert(0, Message(role="system", content=tool_prompt))
133
-
134
- payload = {"messages": [msg.model_dump() for msg in request.messages], "model": model_id}
135
-
136
  if request.stream:
137
  async def event_stream():
138
- created = int(time.time())
139
- usage_info = None
140
- is_first_chunk = True
141
- tool_call_buffer = ""
142
- in_tool_call = False
143
-
144
  try:
145
- async with httpx.AsyncClient(timeout=120) as client:
146
- async with client.stream("POST", CHAT_API_URL, headers=headers, json=payload) as response:
147
  response.raise_for_status()
148
  async for line in response.aiter_lines():
149
- if not line: continue
150
  if line.startswith("0:"):
151
- try:
152
- content_piece = json.loads(line[2:])
153
- except json.JSONDecodeError:
154
- continue
155
-
156
- current_buffer = content_piece
157
- if in_tool_call:
158
- current_buffer = tool_call_buffer + content_piece
159
-
160
- if "</tool_call>" in current_buffer:
161
- tool_str = current_buffer.split("<tool_call>")[1].split("</tool_call>")[0]
162
- tool_json = json.loads(tool_str.strip())
163
- delta = {
164
- "content": None,
165
- "tool_calls": [{"index": 0, "id": generate_random_id("call_"), "type": "function",
166
- "function": {"name": tool_json["name"], "arguments": json.dumps(tool_json["parameters"])}}]
167
- }
168
- chunk = {"id": chat_id, "object": "chat.completion.chunk", "created": created, "model": model_id,
169
- "choices": [{"index": 0, "delta": delta, "finish_reason": None}], "usage": None}
170
- yield f"data: {json.dumps(chunk)}\n\n"
171
-
172
- in_tool_call = False
173
- tool_call_buffer = ""
174
- remaining_text = current_buffer.split("</tool_call>", 1)[1]
175
- if remaining_text:
176
- content_piece = remaining_text
177
- else:
178
- continue
179
-
180
- if "<tool_call>" in content_piece:
181
- in_tool_call = True
182
- tool_call_buffer += content_piece.split("<tool_call>", 1)[1]
183
- text_before = content_piece.split("<tool_call>", 1)[0]
184
  if text_before:
185
- delta = {"content": text_before, "tool_calls": None}
186
- chunk = {"id": chat_id, "object": "chat.completion.chunk", "created": created, "model": model_id,
187
- "choices": [{"index": 0, "delta": delta, "finish_reason": None}], "usage": None}
188
- yield f"data: {json.dumps(chunk)}\n\n"
189
- if "</tool_call>" not in tool_call_buffer:
190
- continue
191
-
192
  if not in_tool_call:
193
- delta = {"content": content_piece}
194
- if is_first_chunk:
195
- delta["role"] = "assistant"
196
- is_first_chunk = False
197
- chunk = {"id": chat_id, "object": "chat.completion.chunk", "created": created, "model": model_id,
198
- "choices": [{"index": 0, "delta": delta, "finish_reason": None}], "usage": None}
199
- yield f"data: {json.dumps(chunk)}\n\n"
200
-
201
- elif line.startswith(("e:", "d:")):
202
- try:
203
- usage_info = json.loads(line[2:]).get("usage")
204
- except (json.JSONDecodeError, AttributeError): pass
205
  break
206
-
207
- final_usage = None
208
- if usage_info:
209
- final_usage = {"prompt_tokens": usage_info.get("promptTokens", 0), "completion_tokens": usage_info.get("completionTokens", 0), "total_tokens": usage_info.get("promptTokens", 0) + usage_info.get("completionTokens", 0)}
210
- done_chunk = {"id": chat_id, "object": "chat.completion.chunk", "created": created, "model": model_id,
211
- "choices": [{"index": 0, "delta": {}, "finish_reason": "stop" if not in_tool_call else "tool_calls"}], "usage": final_usage}
212
- yield f"data: {json.dumps(done_chunk)}\n\n"
213
-
214
- except httpx.HTTPStatusError as e:
215
- error_content = {"error": {"message": f"Upstream API error: {e.response.status_code}. Details: {e.response.text}", "type": "upstream_error", "code": str(e.response.status_code)}}
216
- yield f"data: {json.dumps(error_content)}\n\n"
217
- finally:
218
- yield "data: [DONE]\n\n"
219
-
220
- return StreamingResponse(event_stream(), media_type="text/event-stream")
221
  else:
222
- full_response, usage_info = "", {}
223
  try:
224
- async with httpx.AsyncClient(timeout=120) as client:
225
- async with client.stream("POST", CHAT_API_URL, headers=headers, json=payload) as response:
226
  response.raise_for_status()
227
  async for chunk in response.aiter_lines():
228
  if chunk.startswith("0:"):
229
- try: full_response += json.loads(chunk[2:])
230
- except: continue
231
- elif chunk.startswith(("e:", "d:")):
232
- try: usage_info = json.loads(chunk[2:]).get("usage", {})
233
- except: continue
234
-
235
- tool_calls = None
236
- content_response = full_response
237
- if "<tool_call>" in full_response and "</tool_call>" in full_response:
238
- tool_call_str = full_response.split("<tool_call>")[1].split("</tool_call>")[0]
239
- tool_call = json.loads(tool_call_str.strip())
240
- tool_calls = [{"id": generate_random_id("call_"), "type": "function", "function": {"name": tool_call["name"], "arguments": json.dumps(tool_call["parameters"])}}]
241
- content_response = None
242
-
243
- return JSONResponse(content={
244
- "id": chat_id, "object": "chat.completion", "created": int(time.time()), "model": model_id,
245
- "choices": [{"index": 0, "message": {"role": "assistant", "content": content_response, "tool_calls": tool_calls}, "finish_reason": "stop" if not tool_calls else "tool_calls"}],
246
- "usage": {"prompt_tokens": usage_info.get("promptTokens", 0), "completion_tokens": usage_info.get("completionTokens", 0), "total_tokens": usage_info.get("promptTokens", 0) + usage_info.get("completionTokens", 0)}
247
- })
248
- except httpx.HTTPStatusError as e:
249
- return JSONResponse(status_code=e.response.status_code, content={"error": {"message": f"Upstream API error. Details: {e.response.text}", "type": "upstream_error"}})
250
 
251
  @app.post("/v1/images/generations", tags=["Images"])
252
  async def generate_images(request: ImageGenerationRequest):
253
- """Handles image generation requests."""
254
- results = []
255
  try:
256
- async with httpx.AsyncClient(timeout=120) as client:
257
  for _ in range(request.n):
258
- model = request.model or "default"
259
- if model in ["gpt-image-1", "dall-e-3", "dall-e-2", "nextlm-image-1"]:
260
- headers = {'Content-Type': 'application/json', 'User-Agent': 'Mozilla/5.0', 'Referer': 'https://www.chatwithmono.xyz/'}
261
- payload = {"prompt": request.prompt, "model": model}
262
- resp = await client.post(IMAGE_GEN_API_URL, headers=headers, json=payload)
263
- resp.raise_for_status()
264
- data = resp.json()
265
- b64_image = data.get("image")
266
- if not b64_image: return JSONResponse(status_code=502, content={"error": "Missing base64 image in response"})
267
-
268
- image_url = f"data:image/png;base64,{b64_image}"
269
  if SNAPZION_API_KEY:
270
- upload_headers = {"Authorization": SNAPZION_API_KEY}
271
- upload_files = {'file': ('image.png', base64.b64decode(b64_image), 'image/png')}
272
- upload_resp = await client.post(SNAPZION_UPLOAD_URL, headers=upload_headers, files=upload_files)
273
- if upload_resp.status_code == 200:
274
- image_url = upload_resp.json().get("url", image_url)
275
-
276
- results.append({"url": image_url, "b64_json": b64_image, "revised_prompt": data.get("revised_prompt")})
277
- else:
278
- params = {"prompt": request.prompt, "aspect_ratio": request.aspect_ratio, "link": "typegpt.net"}
279
- resp = await client.get(IMAGE_API_URL, params=params)
280
- resp.raise_for_status()
281
- data = resp.json()
282
- results.append({"url": data.get("image_link"), "b64_json": data.get("base64_output")})
283
- except httpx.HTTPStatusError as e:
284
- return JSONResponse(status_code=502, content={"error": f"Image generation failed. Upstream error: {e.response.status_code}", "details": e.response.text})
285
- except Exception as e:
286
- return JSONResponse(status_code=500, content={"error": "An internal error occurred.", "details": str(e)})
287
- return {"created": int(time.time()), "data": results}
288
 
289
 
290
- # === FIXED OCR Endpoint ===
291
  @app.post("/v1/ocr", response_model=OcrResponse, tags=["OCR"])
292
  async def perform_ocr(request: OcrRequest):
293
  """
@@ -316,29 +218,37 @@ async def perform_ocr(request: OcrRequest):
316
  raw_output = prediction[0]
317
  raw_result_dict = {}
318
 
319
- # --- START: FIX ---
320
- # The Gradio client returns a JSON string, not a dict. We must parse it.
321
  if isinstance(raw_output, str):
322
  try:
 
323
  raw_result_dict = json.loads(raw_output)
324
  except json.JSONDecodeError:
325
- raise HTTPException(status_code=502, detail="Failed to parse JSON response from OCR service.")
 
 
 
 
 
 
 
 
 
 
326
  elif isinstance(raw_output, dict):
327
- # If it's already a dict, use it directly
328
  raw_result_dict = raw_output
329
  else:
 
330
  raise HTTPException(status_code=502, detail=f"Unexpected data type from OCR service: {type(raw_output)}")
331
- # --- END: FIX ---
332
-
333
- ocr_text = raw_result_dict.get("OCR", "")
334
- # Fallback in case the OCR key is missing but there's other data
335
- if not ocr_text:
336
- ocr_text = str(raw_result_dict)
337
 
 
 
 
338
  return OcrResponse(ocr_text=ocr_text, raw_response=raw_result_dict)
339
 
340
  except Exception as e:
341
- # Catch the specific HTTPException and re-raise it, otherwise wrap other exceptions
342
  if isinstance(e, HTTPException):
343
  raise e
344
  raise HTTPException(status_code=500, detail=f"An error occurred during OCR processing: {str(e)}")
@@ -348,39 +258,20 @@ async def perform_ocr(request: OcrRequest):
348
 
349
  @app.post("/v1/moderations", tags=["Moderation"])
350
  async def create_moderation(request: ModerationRequest):
351
- """Handles moderation requests, conforming to the OpenAI API specification."""
352
- input_texts = [request.input] if isinstance(request.input, str) else request.input
353
- if not input_texts:
354
- return JSONResponse(status_code=400, content={"error": {"message": "Request must have at least one input string."}})
355
- headers = {'Content-Type': 'application/json', 'User-Agent': 'Mozilla/5.0', 'Referer': 'https://www.chatwithmono.xyz/'}
356
- results = []
357
  try:
358
- async with httpx.AsyncClient(timeout=30) as client:
359
  for text_input in input_texts:
360
- resp = await client.post(MODERATION_API_URL, headers=headers, json={"text": text_input})
361
- resp.raise_for_status()
362
- upstream_data = resp.json()
363
- upstream_categories = upstream_data.get("categories", {})
364
- openai_categories = {
365
- "hate": upstream_categories.get("hate", False), "hate/threatening": False, "harassment": False, "harassment/threatening": False,
366
- "self-harm": upstream_categories.get("self-harm", False), "self-harm/intent": False, "self-harm/instructions": False,
367
- "sexual": upstream_categories.get("sexual", False), "sexual/minors": False,
368
- "violence": upstream_categories.get("violence", False), "violence/graphic": False,
369
- }
370
- result_item = {
371
- "flagged": upstream_data.get("overall_sentiment") == "flagged",
372
- "categories": openai_categories,
373
- "category_scores": {k: 1.0 if v else 0.0 for k, v in openai_categories.items()},
374
- }
375
- if reason := upstream_data.get("reason"):
376
- result_item["reason"] = reason
377
  results.append(result_item)
378
- except httpx.HTTPStatusError as e:
379
- return JSONResponse(status_code=502, content={"error": {"message": f"Moderation failed. Upstream error: {e.response.status_code}", "details": e.response.text}})
380
- except Exception as e:
381
- return JSONResponse(status_code=500, content={"error": {"message": "An internal error occurred during moderation.", "details": str(e)}})
382
-
383
- return JSONResponse(content={"id": generate_random_id("modr-"), "model": request.model, "results": results})
384
 
385
 
386
  # --- Main Execution ---
 
5
  import string
6
  import time
7
  import tempfile
8
+ import ast # <-- NEW IMPORT for safe literal evaluation
9
  from typing import List, Optional, Union, Any
10
 
11
  import httpx
 
19
 
20
  # --- Configuration ---
21
  load_dotenv()
 
 
22
  IMAGE_API_URL = os.environ.get("IMAGE_API_URL", "https://image.api.example.com")
23
  SNAPZION_UPLOAD_URL = "https://upload.snapzion.com/api/public-upload"
24
  SNAPZION_API_KEY = os.environ.get("SNAP", "")
 
41
  app = FastAPI(
42
  title="OpenAI Compatible API",
43
  description="An adapter for various services to be compatible with the OpenAI API specification.",
44
+ version="1.1.2" # Incremented version for the new fix
45
  )
 
46
  try:
47
  ocr_client = Client("multimodalart/Florence-2-l4")
48
  except Exception as e:
 
54
  class Message(BaseModel):
55
  role: str
56
  content: str
 
57
  class ChatRequest(BaseModel):
58
  messages: List[Message]
59
  model: str
60
  stream: Optional[bool] = False
61
  tools: Optional[Any] = None
 
62
  class ImageGenerationRequest(BaseModel):
63
  prompt: str
64
  aspect_ratio: Optional[str] = "1:1"
65
  n: Optional[int] = 1
66
  user: Optional[str] = None
67
  model: Optional[str] = "default"
 
68
  class ModerationRequest(BaseModel):
69
  input: Union[str, List[str]]
70
  model: Optional[str] = "text-moderation-stable"
 
71
  class OcrRequest(BaseModel):
72
  image_url: Optional[str] = Field(None, description="URL of the image to process.")
73
  image_b64: Optional[str] = Field(None, description="Base64 encoded string of the image to process.")
 
74
  @model_validator(mode='before')
75
  @classmethod
76
  def check_sources(cls, data: Any) -> Any:
 
80
  if data.get('image_url') and data.get('image_b64'):
81
  raise ValueError('Provide either image_url or image_b64, not both.')
82
  return data
 
83
  class OcrResponse(BaseModel):
84
  ocr_text: str
85
  raw_response: dict
 
94
 
95
  @app.get("/v1/models", tags=["Models"])
96
  async def list_models():
 
97
  return {"object": "list", "data": AVAILABLE_MODELS}
98
 
99
+ # (Chat, Image Generation, and Moderation endpoints are unchanged and remain correct)
100
  @app.post("/v1/chat/completions", tags=["Chat"])
101
  async def chat_completion(request: ChatRequest):
102
+ model_id=MODEL_ALIASES.get(request.model,request.model);chat_id=generate_random_id("chatcmpl-");headers={'accept':'text/event-stream','content-type':'application/json','origin':'https://www.chatwithmono.xyz','referer':'https://www.chatwithmono.xyz/','user-agent':'Mozilla/5.0'}
 
 
 
 
 
 
 
 
 
 
103
  if request.tools:
104
+ tool_prompt=f"""You have access to the following tools. To call a tool, please respond with JSON for a tool call within <tool_call></tool_call> XML tags. Respond in the format {{"name": tool name, "parameters": dictionary of argument name and its value}}. Do not use variables.
105
  Tools: {";".join(f"<tool>{tool}</tool>" for tool in request.tools)}
106
  Response Format for tool call:
107
  <tool_call>
108
  {{"name": <function-name>, "arguments": <args-json-object>}}
109
  </tool_call>"""
110
+ if request.messages[0].role=="system":request.messages[0].content+="\n\n"+tool_prompt
111
+ else:request.messages.insert(0,Message(role="system",content=tool_prompt))
112
+ payload={"messages":[msg.model_dump()for msg in request.messages],"model":model_id}
 
 
 
 
113
  if request.stream:
114
  async def event_stream():
115
+ created=int(time.time());usage_info=None;is_first_chunk=True;tool_call_buffer="";in_tool_call=False
 
 
 
 
 
116
  try:
117
+ async with httpx.AsyncClient(timeout=120)as client:
118
+ async with client.stream("POST",CHAT_API_URL,headers=headers,json=payload)as response:
119
  response.raise_for_status()
120
  async for line in response.aiter_lines():
121
+ if not line:continue
122
  if line.startswith("0:"):
123
+ try:content_piece=json.loads(line[2:])
124
+ except json.JSONDecodeError:continue
125
+ current_buffer=content_piece
126
+ if in_tool_call:current_buffer=tool_call_buffer+content_piece
127
+ if"</tool_call>"in current_buffer:
128
+ tool_str=current_buffer.split("<tool_call>")[1].split("</tool_call>")[0];tool_json=json.loads(tool_str.strip());delta={"content":None,"tool_calls":[{"index":0,"id":generate_random_id("call_"),"type":"function","function":{"name":tool_json["name"],"arguments":json.dumps(tool_json["parameters"])}}]}
129
+ chunk={"id":chat_id,"object":"chat.completion.chunk","created":created,"model":model_id,"choices":[{"index":0,"delta":delta,"finish_reason":None}],"usage":None};yield f"data: {json.dumps(chunk)}\n\n"
130
+ in_tool_call=False;tool_call_buffer="";remaining_text=current_buffer.split("</tool_call>",1)[1]
131
+ if remaining_text:content_piece=remaining_text
132
+ else:continue
133
+ if"<tool_call>"in content_piece:
134
+ in_tool_call=True;tool_call_buffer+=content_piece.split("<tool_call>",1)[1];text_before=content_piece.split("<tool_call>",1)[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  if text_before:
136
+ delta={"content":text_before,"tool_calls":None};chunk={"id":chat_id,"object":"chat.completion.chunk","created":created,"model":model_id,"choices":[{"index":0,"delta":delta,"finish_reason":None}],"usage":None};yield f"data: {json.dumps(chunk)}\n\n"
137
+ if"</tool_call>"not in tool_call_buffer:continue
 
 
 
 
 
138
  if not in_tool_call:
139
+ delta={"content":content_piece}
140
+ if is_first_chunk:delta["role"]="assistant";is_first_chunk=False
141
+ chunk={"id":chat_id,"object":"chat.completion.chunk","created":created,"model":model_id,"choices":[{"index":0,"delta":delta,"finish_reason":None}],"usage":None};yield f"data: {json.dumps(chunk)}\n\n"
142
+ elif line.startswith(("e:","d:")):
143
+ try:usage_info=json.loads(line[2:]).get("usage")
144
+ except(json.JSONDecodeError,AttributeError):pass
 
 
 
 
 
 
145
  break
146
+ final_usage=None
147
+ if usage_info:final_usage={"prompt_tokens":usage_info.get("promptTokens",0),"completion_tokens":usage_info.get("completionTokens",0),"total_tokens":usage_info.get("promptTokens",0)+usage_info.get("completionTokens",0)}
148
+ done_chunk={"id":chat_id,"object":"chat.completion.chunk","created":created,"model":model_id,"choices":[{"index":0,"delta":{},"finish_reason":"stop"if not in_tool_call else"tool_calls"}],"usage":final_usage};yield f"data: {json.dumps(done_chunk)}\n\n"
149
+ except httpx.HTTPStatusError as e:error_content={"error":{"message":f"Upstream API error: {e.response.status_code}. Details: {e.response.text}","type":"upstream_error","code":str(e.response.status_code)}};yield f"data: {json.dumps(error_content)}\n\n"
150
+ finally:yield"data: [DONE]\n\n"
151
+ return StreamingResponse(event_stream(),media_type="text/event-stream")
 
 
 
 
 
 
 
 
 
152
  else:
153
+ full_response,usage_info="",{}
154
  try:
155
+ async with httpx.AsyncClient(timeout=120)as client:
156
+ async with client.stream("POST",CHAT_API_URL,headers=headers,json=payload)as response:
157
  response.raise_for_status()
158
  async for chunk in response.aiter_lines():
159
  if chunk.startswith("0:"):
160
+ try:full_response+=json.loads(chunk[2:])
161
+ except:continue
162
+ elif chunk.startswith(("e:","d:")):
163
+ try:usage_info=json.loads(chunk[2:]).get("usage",{})
164
+ except:continue
165
+ tool_calls=None;content_response=full_response
166
+ if"<tool_call>"in full_response and"</tool_call>"in full_response:
167
+ tool_call_str=full_response.split("<tool_call>")[1].split("</tool_call>")[0];tool_call=json.loads(tool_call_str.strip());tool_calls=[{"id":generate_random_id("call_"),"type":"function","function":{"name":tool_call["name"],"arguments":json.dumps(tool_call["parameters"])}}];content_response=None
168
+ return JSONResponse(content={"id":chat_id,"object":"chat.completion","created":int(time.time()),"model":model_id,"choices":[{"index":0,"message":{"role":"assistant","content":content_response,"tool_calls":tool_calls},"finish_reason":"stop"if not tool_calls else"tool_calls"}],"usage":{"prompt_tokens":usage_info.get("promptTokens",0),"completion_tokens":usage_info.get("completionTokens",0),"total_tokens":usage_info.get("promptTokens",0)+usage_info.get("completionTokens",0)}})
169
+ except httpx.HTTPStatusError as e:return JSONResponse(status_code=e.response.status_code,content={"error":{"message":f"Upstream API error. Details: {e.response.text}","type":"upstream_error"}})
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  @app.post("/v1/images/generations", tags=["Images"])
172
  async def generate_images(request: ImageGenerationRequest):
173
+ results=[]
 
174
  try:
175
+ async with httpx.AsyncClient(timeout=120)as client:
176
  for _ in range(request.n):
177
+ model=request.model or"default"
178
+ if model in["gpt-image-1","dall-e-3","dall-e-2","nextlm-image-1"]:
179
+ headers={'Content-Type':'application/json','User-Agent':'Mozilla/5.0','Referer':'https://www.chatwithmono.xyz/'};payload={"prompt":request.prompt,"model":model};resp=await client.post(IMAGE_GEN_API_URL,headers=headers,json=payload);resp.raise_for_status();data=resp.json();b64_image=data.get("image")
180
+ if not b64_image:return JSONResponse(status_code=502,content={"error":"Missing base64 image in response"})
181
+ image_url=f"data:image/png;base64,{b64_image}"
 
 
 
 
 
 
182
  if SNAPZION_API_KEY:
183
+ upload_headers={"Authorization":SNAPZION_API_KEY};upload_files={'file':('image.png',base64.b64decode(b64_image),'image/png')};upload_resp=await client.post(SNAPZION_UPLOAD_URL,headers=upload_headers,files=upload_files)
184
+ if upload_resp.status_code==200:image_url=upload_resp.json().get("url",image_url)
185
+ results.append({"url":image_url,"b64_json":b64_image,"revised_prompt":data.get("revised_prompt")})
186
+ else:params={"prompt":request.prompt,"aspect_ratio":request.aspect_ratio,"link":"typegpt.net"};resp=await client.get(IMAGE_API_URL,params=params);resp.raise_for_status();data=resp.json();results.append({"url":data.get("image_link"),"b64_json":data.get("base64_output")})
187
+ except httpx.HTTPStatusError as e:return JSONResponse(status_code=502,content={"error":f"Image generation failed. Upstream error: {e.response.status_code}","details":e.response.text})
188
+ except Exception as e:return JSONResponse(status_code=500,content={"error":"An internal error occurred.","details":str(e)})
189
+ return{"created":int(time.time()),"data":results}
 
 
 
 
 
 
 
 
 
 
 
190
 
191
 
192
+ # === REVISED AND FIXED OCR Endpoint ===
193
  @app.post("/v1/ocr", response_model=OcrResponse, tags=["OCR"])
194
  async def perform_ocr(request: OcrRequest):
195
  """
 
218
  raw_output = prediction[0]
219
  raw_result_dict = {}
220
 
221
+ # --- START: ROBUST PARSING LOGIC ---
 
222
  if isinstance(raw_output, str):
223
  try:
224
+ # First, try to parse as standard JSON
225
  raw_result_dict = json.loads(raw_output)
226
  except json.JSONDecodeError:
227
+ try:
228
+ # If JSON fails, try to evaluate as a Python literal (handles single quotes)
229
+ parsed_output = ast.literal_eval(raw_output)
230
+ if isinstance(parsed_output, dict):
231
+ raw_result_dict = parsed_output
232
+ else:
233
+ # The literal is something else (e.g., a list), wrap it.
234
+ raw_result_dict = {"result": str(parsed_output)}
235
+ except (ValueError, SyntaxError):
236
+ # If all parsing fails, assume the string is the direct OCR text.
237
+ raw_result_dict = {"ocr_text": raw_output}
238
  elif isinstance(raw_output, dict):
239
+ # It's already a dictionary, use it directly
240
  raw_result_dict = raw_output
241
  else:
242
+ # Handle other unexpected data types
243
  raise HTTPException(status_code=502, detail=f"Unexpected data type from OCR service: {type(raw_output)}")
244
+ # --- END: ROBUST PARSING LOGIC ---
 
 
 
 
 
245
 
246
+ # Extract text from the dictionary, with fallbacks
247
+ ocr_text = raw_result_dict.get("OCR", raw_result_dict.get("ocr_text", str(raw_result_dict)))
248
+
249
  return OcrResponse(ocr_text=ocr_text, raw_response=raw_result_dict)
250
 
251
  except Exception as e:
 
252
  if isinstance(e, HTTPException):
253
  raise e
254
  raise HTTPException(status_code=500, detail=f"An error occurred during OCR processing: {str(e)}")
 
258
 
259
  @app.post("/v1/moderations", tags=["Moderation"])
260
  async def create_moderation(request: ModerationRequest):
261
+ input_texts=[request.input]if isinstance(request.input,str)else request.input
262
+ if not input_texts:return JSONResponse(status_code=400,content={"error":{"message":"Request must have at least one input string."}})
263
+ headers={'Content-Type':'application/json','User-Agent':'Mozilla/5.0','Referer':'https://www.chatwithmono.xyz/'};results=[]
 
 
 
264
  try:
265
+ async with httpx.AsyncClient(timeout=30)as client:
266
  for text_input in input_texts:
267
+ resp=await client.post(MODERATION_API_URL,headers=headers,json={"text":text_input});resp.raise_for_status();upstream_data=resp.json();upstream_categories=upstream_data.get("categories",{})
268
+ openai_categories={"hate":upstream_categories.get("hate",False),"hate/threatening":False,"harassment":False,"harassment/threatening":False,"self-harm":upstream_categories.get("self-harm",False),"self-harm/intent":False,"self-harm/instructions":False,"sexual":upstream_categories.get("sexual",False),"sexual/minors":False,"violence":upstream_categories.get("violence",False),"violence/graphic":False}
269
+ result_item={"flagged":upstream_data.get("overall_sentiment")=="flagged","categories":openai_categories,"category_scores":{k:1.0 if v else 0.0 for k,v in openai_categories.items()}}
270
+ if reason:=upstream_data.get("reason"):result_item["reason"]=reason
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  results.append(result_item)
272
+ except httpx.HTTPStatusError as e:return JSONResponse(status_code=502,content={"error":{"message":f"Moderation failed. Upstream error: {e.response.status_code}","details":e.response.text}})
273
+ except Exception as e:return JSONResponse(status_code=500,content={"error":{"message":"An internal error occurred during moderation.","details":str(e)}})
274
+ return JSONResponse(content={"id":generate_random_id("modr-"),"model":request.model,"results":results})
 
 
 
275
 
276
 
277
  # --- Main Execution ---