Niansuh commited on
Commit
ba2ac0e
·
verified ·
1 Parent(s): 8c3ee02

Delete api

Browse files
Files changed (10) hide show
  1. api/__init__.py +0 -0
  2. api/__pycache__/dummy.txt +0 -1
  3. api/app.py +0 -40
  4. api/auth.py +0 -10
  5. api/config.py +0 -153
  6. api/logger.py +0 -20
  7. api/models.py +0 -14
  8. api/provider/gizai.py +0 -153
  9. api/routes.py +0 -59
  10. api/utils.py +0 -198
api/__init__.py DELETED
File without changes
api/__pycache__/dummy.txt DELETED
@@ -1 +0,0 @@
1
-
 
 
api/app.py DELETED
@@ -1,40 +0,0 @@
1
- from fastapi import FastAPI, Request
2
- from starlette.middleware.cors import CORSMiddleware
3
- from fastapi.responses import JSONResponse
4
- from api.logger import setup_logger
5
- from api.routes import router
6
-
7
- logger = setup_logger(__name__)
8
-
9
- def create_app():
10
- app = FastAPI(
11
- title="NiansuhAI API Gateway",
12
- docs_url=None, # Disable Swagger UI
13
- redoc_url=None, # Disable ReDoc
14
- openapi_url=None, # Disable OpenAPI schema
15
- )
16
-
17
- # CORS settings
18
- app.add_middleware(
19
- CORSMiddleware,
20
- allow_origins=["*"], # Adjust as needed for security
21
- allow_credentials=True,
22
- allow_methods=["*"],
23
- allow_headers=["*"],
24
- )
25
-
26
- # Include routes
27
- app.include_router(router)
28
-
29
- # Global exception handler for better error reporting
30
- @app.exception_handler(Exception)
31
- async def global_exception_handler(request: Request, exc: Exception):
32
- logger.error(f"An error occurred: {str(exc)}")
33
- return JSONResponse(
34
- status_code=500,
35
- content={"message": "An internal server error occurred."},
36
- )
37
-
38
- return app
39
-
40
- app = create_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/auth.py DELETED
@@ -1,10 +0,0 @@
1
- from fastapi import Depends, HTTPException
2
- from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
3
- from api.config import APP_SECRET
4
-
5
- security = HTTPBearer()
6
-
7
- def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
8
- if credentials.credentials != APP_SECRET:
9
- raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
10
- return credentials.credentials
 
 
 
 
 
 
 
 
 
 
 
api/config.py DELETED
@@ -1,153 +0,0 @@
1
- import os
2
- from dotenv import load_dotenv
3
-
4
- load_dotenv()
5
-
6
- BASE_URL = "https://www.blackbox.ai"
7
- headers = {
8
- 'accept': '*/*',
9
- 'accept-language': 'en-US,en;q=0.9',
10
- 'origin': 'https://www.blackbox.ai',
11
- 'priority': 'u=1, i',
12
- 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
13
- 'sec-ch-ua-mobile': '?0',
14
- 'sec-ch-ua-platform': '"Windows"',
15
- 'sec-fetch-dest': 'empty',
16
- 'sec-fetch-mode': 'cors',
17
- 'sec-fetch-site': 'same-origin',
18
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
19
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
20
- 'Chrome/130.0.0.0 Safari/537.36',
21
- }
22
- APP_SECRET = os.getenv("APP_SECRET")
23
-
24
- ALLOWED_MODELS = [
25
- {"id": "blackboxai", "name": "blackboxai"},
26
- {"id": "blackboxai-pro", "name": "blackboxai-pro"},
27
- {"id": "flux", "name": "flux"},
28
- {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
29
- {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
30
- {"id": "llama-3.1-405b", "name": "llama-3.1-405b"},
31
- {"id": "gpt-4o", "name": "gpt-4o"},
32
- {"id": "gemini-pro", "name": "gemini-pro"},
33
- {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
34
- {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
35
- {"id": "PythonAgent", "name": "PythonAgent"},
36
- {"id": "JavaAgent", "name": "JavaAgent"},
37
- {"id": "JavaScriptAgent", "name": "JavaScriptAgent"},
38
- {"id": "HTMLAgent", "name": "HTMLAgent"},
39
- {"id": "GoogleCloudAgent", "name": "GoogleCloudAgent"},
40
- {"id": "AndroidDeveloper", "name": "AndroidDeveloper"},
41
- {"id": "SwiftDeveloper", "name": "SwiftDeveloper"},
42
- {"id": "Next.jsAgent", "name": "Next.jsAgent"},
43
- {"id": "MongoDBAgent", "name": "MongoDBAgent"},
44
- {"id": "PyTorchAgent", "name": "PyTorchAgent"},
45
- {"id": "ReactAgent", "name": "ReactAgent"},
46
- {"id": "XcodeAgent", "name": "XcodeAgent"},
47
- {"id": "AngularJSAgent", "name": "AngularJSAgent"},
48
- {"id": "RepoMap", "name": "RepoMap"},
49
- {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
50
- {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
51
- {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
52
- {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
53
- {"id": "Niansuh", "name": "Niansuh"},
54
- ]
55
-
56
- MODEL_MAPPING = {
57
- "blackboxai": "blackboxai",
58
- "blackboxai-pro": "blackboxai-pro",
59
- "flux": "flux",
60
- "ImageGeneration": "flux",
61
- "llama-3.1-8b": "llama-3.1-8b",
62
- "llama-3.1-70b": "llama-3.1-70b",
63
- "llama-3.1-405b": "llama-3.1-405b",
64
- "gpt-4o": "gpt-4o",
65
- "gemini-pro": "gemini-pro",
66
- "gemini-1.5-flash": "gemini-1.5-flash",
67
- "claude-sonnet-3.5": "claude-sonnet-3.5",
68
- "PythonAgent": "PythonAgent",
69
- "JavaAgent": "JavaAgent",
70
- "JavaScriptAgent": "JavaScriptAgent",
71
- "HTMLAgent": "HTMLAgent",
72
- "GoogleCloudAgent": "GoogleCloudAgent",
73
- "AndroidDeveloper": "AndroidDeveloper",
74
- "SwiftDeveloper": "SwiftDeveloper",
75
- "Next.jsAgent": "Next.jsAgent",
76
- "MongoDBAgent": "MongoDBAgent",
77
- "PyTorchAgent": "PyTorchAgent",
78
- "ReactAgent": "ReactAgent",
79
- "XcodeAgent": "XcodeAgent",
80
- "AngularJSAgent": "AngularJSAgent",
81
- "RepoMap": "RepoMap",
82
- # Additional mappings
83
- "gemini-flash": "gemini-1.5-flash",
84
- "claude-3.5-sonnet": "claude-sonnet-3.5",
85
- "flux": "flux",
86
- "gemini-1.5-pro-latest": "gemini-pro",
87
- "gemini-1.5-pro": "gemini-1.5-pro",
88
- "claude-3-5-sonnet-20240620": "claude-sonnet-3.5",
89
- "claude-3-5-sonnet": "claude-sonnet-3.5",
90
- "Niansuh": "Niansuh",
91
- }
92
-
93
- # Agent modes
94
- AGENT_MODE = {
95
- 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
96
- 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
97
-
98
- }
99
-
100
- TRENDING_AGENT_MODE = {
101
- "blackboxai": {},
102
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
103
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
104
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
105
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
106
- 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
107
- 'PythonAgent': {'mode': True, 'id': "Python Agent"},
108
- 'JavaAgent': {'mode': True, 'id': "Java Agent"},
109
- 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
110
- 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
111
- 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
112
- 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
113
- 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
114
- 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
115
- 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
116
- 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
117
- 'ReactAgent': {'mode': True, 'id': "React Agent"},
118
- 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
119
- 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
120
- 'RepoMap': {'mode': True, 'id': "repomap"},
121
- }
122
-
123
- # Model prefixes
124
- MODEL_PREFIXES = {
125
- 'gpt-4o': '@GPT-4o',
126
- 'gemini-pro': '@Gemini-PRO',
127
- 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
128
- 'PythonAgent': '@Python Agent',
129
- 'JavaAgent': '@Java Agent',
130
- 'JavaScriptAgent': '@JavaScript Agent',
131
- 'HTMLAgent': '@HTML Agent',
132
- 'GoogleCloudAgent': '@Google Cloud Agent',
133
- 'AndroidDeveloper': '@Android Developer',
134
- 'SwiftDeveloper': '@Swift Developer',
135
- 'Next.jsAgent': '@Next.js Agent',
136
- 'MongoDBAgent': '@MongoDB Agent',
137
- 'PyTorchAgent': '@PyTorch Agent',
138
- 'ReactAgent': '@React Agent',
139
- 'XcodeAgent': '@Xcode Agent',
140
- 'AngularJSAgent': '@AngularJS Agent',
141
- 'blackboxai-pro': '@BLACKBOXAI-PRO',
142
- 'flux': '@Image Generation',
143
- # Add any additional prefixes if necessary
144
- }
145
-
146
- # Model referers
147
- MODEL_REFERERS = {
148
- "blackboxai": "/?model=blackboxai",
149
- "gpt-4o": "/?model=gpt-4o",
150
- "gemini-pro": "/?model=gemini-pro",
151
- "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
152
- # Add any additional referers if necessary
153
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/logger.py DELETED
@@ -1,20 +0,0 @@
1
- import logging
2
-
3
- def setup_logger(name):
4
- logger = logging.getLogger(name)
5
- if not logger.handlers:
6
- logger.setLevel(logging.INFO)
7
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
8
-
9
- # Console handler
10
- console_handler = logging.StreamHandler()
11
- console_handler.setFormatter(formatter)
12
- logger.addHandler(console_handler)
13
-
14
- # File Handler - Error Level
15
- # error_file_handler = logging.FileHandler('error.log')
16
- # error_file_handler.setFormatter(formatter)
17
- # error_file_handler.setLevel(logging.ERROR)
18
- # logger.addHandler(error_file_handler)
19
-
20
- return logger
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/models.py DELETED
@@ -1,14 +0,0 @@
1
- from typing import List, Optional
2
- from pydantic import BaseModel
3
-
4
- class Message(BaseModel):
5
- role: str
6
- content: str | list
7
-
8
- class ChatRequest(BaseModel):
9
- model: str
10
- messages: List[Message]
11
- stream: Optional[bool] = False
12
- temperature: Optional[float] = 0.7
13
- top_p: Optional[float] = 0.9
14
- max_tokens: Optional[int] = 99999999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/provider/gizai.py DELETED
@@ -1,153 +0,0 @@
1
- # api/provider/gizai.py
2
-
3
- from __future__ import annotations
4
-
5
- import json
6
- from aiohttp import ClientSession
7
-
8
- from ..typing import AsyncResult, Messages
9
- from ..image import ImageResponse
10
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
11
- from .helper import format_prompt
12
-
13
- class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
14
- url = "https://app.giz.ai/assistant/"
15
- api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
16
- working = True
17
-
18
- supports_system_message = True
19
- supports_message_history = True
20
-
21
- # Chat models
22
- default_model = 'chat-gemini-flash'
23
- chat_models = [
24
- default_model,
25
- 'chat-gemini-pro',
26
- 'chat-gpt4m',
27
- 'chat-gpt4',
28
- 'claude-sonnet',
29
- 'claude-haiku',
30
- 'llama-3-70b',
31
- 'llama-3-8b',
32
- 'mistral-large',
33
- 'chat-o1-mini'
34
- ]
35
-
36
- # Image models
37
- image_models = [
38
- 'flux1',
39
- 'sdxl',
40
- 'sd',
41
- 'sd35',
42
- ]
43
-
44
- models = [*chat_models, *image_models]
45
-
46
- model_aliases = {
47
- # Chat model aliases
48
- "gemini-flash": "chat-gemini-flash",
49
- "gemini-pro": "chat-gemini-pro",
50
- "gpt-4o-mini": "chat-gpt4m",
51
- "gpt-4o": "chat-gpt4",
52
- "claude-3.5-sonnet": "claude-sonnet",
53
- "claude-3-haiku": "claude-haiku",
54
- "llama-3.1-70b": "llama-3-70b",
55
- "llama-3.1-8b": "llama-3-8b",
56
- "o1-mini": "chat-o1-mini",
57
- # Image model aliases
58
- "sd-1.5": "sd",
59
- "sd-3.5": "sd35",
60
- "flux-schnell": "flux1",
61
- }
62
-
63
- @classmethod
64
- def get_model(cls, model: str) -> str:
65
- if model in cls.models:
66
- return model
67
- elif model in cls.model_aliases:
68
- return cls.model_aliases[model]
69
- else:
70
- return cls.default_model
71
-
72
- @classmethod
73
- def is_image_model(cls, model: str) -> bool:
74
- return model in cls.image_models
75
-
76
- @classmethod
77
- async def create_async_generator(
78
- cls,
79
- model: str,
80
- messages: Messages,
81
- proxy: str = None,
82
- **kwargs
83
- ) -> AsyncResult:
84
- model = cls.get_model(model)
85
-
86
- headers = {
87
- 'Accept': 'application/json, text/plain, */*',
88
- 'Accept-Language': 'en-US,en;q=0.9',
89
- 'Cache-Control': 'no-cache',
90
- 'Connection': 'keep-alive',
91
- 'Content-Type': 'application/json',
92
- 'Origin': 'https://app.giz.ai',
93
- 'Pragma': 'no-cache',
94
- 'Sec-Fetch-Dest': 'empty',
95
- 'Sec-Fetch-Mode': 'cors',
96
- 'Sec-Fetch-Site': 'same-origin',
97
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
98
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
99
- 'sec-ch-ua-mobile': '?0',
100
- 'sec-ch-ua-platform': '"Linux"'
101
- }
102
-
103
- async with ClientSession() as session:
104
- if cls.is_image_model(model):
105
- # Image generation
106
- prompt = messages[-1]["content"]
107
- data = {
108
- "model": model,
109
- "input": {
110
- "width": "1024",
111
- "height": "1024",
112
- "steps": 4,
113
- "output_format": "webp",
114
- "batch_size": 1,
115
- "mode": "plan",
116
- "prompt": prompt
117
- }
118
- }
119
- async with session.post(
120
- cls.api_endpoint,
121
- headers=headers,
122
- data=json.dumps(data),
123
- proxy=proxy
124
- ) as response:
125
- response.raise_for_status()
126
- response_data = await response.json()
127
- if response_data.get('status') == 'completed' and response_data.get('output'):
128
- for url in response_data['output']:
129
- yield ImageResponse(images=url, alt="Generated Image")
130
- else:
131
- # Chat completion
132
- data = {
133
- "model": model,
134
- "input": {
135
- "messages": [
136
- {
137
- "type": "human",
138
- "content": format_prompt(messages)
139
- }
140
- ],
141
- "mode": "plan"
142
- },
143
- "noStream": True
144
- }
145
- async with session.post(
146
- cls.api_endpoint,
147
- headers=headers,
148
- data=json.dumps(data),
149
- proxy=proxy
150
- ) as response:
151
- response.raise_for_status()
152
- result = await response.json()
153
- yield result.get('output', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/routes.py DELETED
@@ -1,59 +0,0 @@
1
- import json
2
- from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
- from fastapi.responses import StreamingResponse
4
- from api.auth import verify_app_secret
5
- from api.config import ALLOWED_MODELS
6
- from api.models import ChatRequest
7
- from api.utils import process_non_streaming_response, process_streaming_response
8
- from api.logger import setup_logger
9
-
10
- logger = setup_logger(__name__)
11
-
12
- router = APIRouter()
13
-
14
- @router.options("/v1/chat/completions")
15
- @router.options("/api/v1/chat/completions")
16
- async def chat_completions_options():
17
- return Response(
18
- status_code=200,
19
- headers={
20
- "Access-Control-Allow-Origin": "*",
21
- "Access-Control-Allow-Methods": "POST, OPTIONS",
22
- "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
- },
24
- )
25
-
26
- @router.get("/v1/models")
27
- @router.get("/api/v1/models")
28
- async def list_models():
29
- return {"object": "list", "data": ALLOWED_MODELS}
30
-
31
- @router.post("/v1/chat/completions")
32
- @router.post("/api/v1/chat/completions")
33
- async def chat_completions(
34
- request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
- ):
36
- logger.info("Entering chat_completions route")
37
- logger.info(f"Processing chat completion request for model: {request.model}")
38
-
39
- if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
- raise HTTPException(
41
- status_code=400,
42
- detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
- )
44
-
45
- if request.stream:
46
- logger.info("Streaming response")
47
- return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
- else:
49
- logger.info("Non-streaming response")
50
- return await process_non_streaming_response(request)
51
-
52
- @router.route('/')
53
- @router.route('/healthz')
54
- @router.route('/ready')
55
- @router.route('/alive')
56
- @router.route('/status')
57
- @router.get("/health")
58
- def health_check(request: Request):
59
- return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/utils.py DELETED
@@ -1,198 +0,0 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
-
5
- import httpx
6
- from api.config import (
7
- MODEL_MAPPING,
8
- headers,
9
- AGENT_MODE,
10
- TRENDING_AGENT_MODE,
11
- BASE_URL,
12
- MODEL_PREFIXES,
13
- MODEL_REFERERS
14
- )
15
- from fastapi import HTTPException
16
- from api.models import ChatRequest
17
-
18
- from api.logger import setup_logger
19
-
20
- import uuid # Added import for uuid
21
-
22
- logger = setup_logger(__name__)
23
-
24
- def create_chat_completion_data(
25
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
26
- ) -> Dict[str, Any]:
27
- return {
28
- "id": f"chatcmpl-{uuid.uuid4()}",
29
- "object": "chat.completion.chunk",
30
- "created": timestamp,
31
- "model": model,
32
- "choices": [
33
- {
34
- "index": 0,
35
- "delta": {"content": content, "role": "assistant"},
36
- "finish_reason": finish_reason,
37
- }
38
- ],
39
- "usage": None,
40
- }
41
-
42
- def message_to_dict(message, model_prefix: Optional[str] = None):
43
- if isinstance(message.content, str):
44
- content = message.content
45
- if model_prefix:
46
- content = f"{model_prefix} {content}"
47
- return {"role": message.role, "content": content}
48
- elif isinstance(message.content, list) and len(message.content) == 2:
49
- content = message.content[0]["text"]
50
- if model_prefix:
51
- content = f"{model_prefix} {content}"
52
- return {
53
- "role": message.role,
54
- "content": content,
55
- "data": {
56
- "imageBase64": message.content[1]["image_url"]["url"],
57
- "fileText": "",
58
- "title": "snapshot",
59
- },
60
- }
61
- else:
62
- return {"role": message.role, "content": message.content}
63
-
64
- def strip_model_prefix(content: str, model_prefix: str) -> str:
65
- """Remove the model prefix from the response content if present."""
66
- if content.startswith(model_prefix):
67
- return content[len(model_prefix):].strip()
68
- return content
69
-
70
- async def process_streaming_response(request: ChatRequest):
71
- agent_mode = AGENT_MODE.get(request.model, {})
72
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
73
- model_prefix = MODEL_PREFIXES.get(request.model, "")
74
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
75
- referer_url = f"{BASE_URL}{referer_path}"
76
-
77
- # Update headers with dynamic Referer
78
- dynamic_headers = headers.copy()
79
- dynamic_headers['Referer'] = referer_url
80
-
81
- json_data = {
82
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
83
- "previewToken": None,
84
- "userId": None,
85
- "codeModelMode": True,
86
- "agentMode": agent_mode,
87
- "trendingAgentMode": trending_agent_mode,
88
- "isMicMode": False,
89
- "userSystemPrompt": None,
90
- "maxTokens": request.max_tokens,
91
- "playgroundTopP": request.top_p,
92
- "playgroundTemperature": request.temperature,
93
- "isChromeExt": False,
94
- "githubToken": None,
95
- "clickedAnswer2": False,
96
- "clickedAnswer3": False,
97
- "clickedForceWebSearch": False,
98
- "visitFromDelta": False,
99
- "mobileClient": False,
100
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
101
- }
102
-
103
- async with httpx.AsyncClient() as client:
104
- try:
105
- async with client.stream(
106
- "POST",
107
- f"{BASE_URL}/api/chat",
108
- headers=dynamic_headers,
109
- json=json_data,
110
- timeout=100,
111
- ) as response:
112
- response.raise_for_status()
113
- async for line in response.aiter_lines():
114
- timestamp = int(datetime.now().timestamp())
115
- if line:
116
- content = line
117
- if content.startswith("$@$v=undefined-rv1$@$"):
118
- content = content[21:]
119
- # Strip the model prefix from the response content
120
- cleaned_content = strip_model_prefix(content, model_prefix)
121
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
122
-
123
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
124
- yield "data: [DONE]\n\n"
125
- except httpx.HTTPStatusError as e:
126
- logger.error(f"HTTP error occurred: {e}")
127
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
128
- except httpx.RequestError as e:
129
- logger.error(f"Error occurred during request: {e}")
130
- raise HTTPException(status_code=500, detail=str(e))
131
-
132
- async def process_non_streaming_response(request: ChatRequest):
133
- agent_mode = AGENT_MODE.get(request.model, {})
134
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
135
- model_prefix = MODEL_PREFIXES.get(request.model, "")
136
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
137
- referer_url = f"{BASE_URL}{referer_path}"
138
-
139
- # Update headers with dynamic Referer
140
- dynamic_headers = headers.copy()
141
- dynamic_headers['Referer'] = referer_url
142
-
143
- json_data = {
144
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
145
- "previewToken": None,
146
- "userId": None,
147
- "codeModelMode": True,
148
- "agentMode": agent_mode,
149
- "trendingAgentMode": trending_agent_mode,
150
- "isMicMode": False,
151
- "userSystemPrompt": None,
152
- "maxTokens": request.max_tokens,
153
- "playgroundTopP": request.top_p,
154
- "playgroundTemperature": request.temperature,
155
- "isChromeExt": False,
156
- "githubToken": None,
157
- "clickedAnswer2": False,
158
- "clickedAnswer3": False,
159
- "clickedForceWebSearch": False,
160
- "visitFromDelta": False,
161
- "mobileClient": False,
162
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
163
- }
164
- full_response = ""
165
- async with httpx.AsyncClient() as client:
166
- try:
167
- async with client.stream(
168
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
169
- ) as response:
170
- response.raise_for_status()
171
- async for chunk in response.aiter_text():
172
- full_response += chunk
173
- except httpx.HTTPStatusError as e:
174
- logger.error(f"HTTP error occurred: {e}")
175
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
176
- except httpx.RequestError as e:
177
- logger.error(f"Error occurred during request: {e}")
178
- raise HTTPException(status_code=500, detail=str(e))
179
- if full_response.startswith("$@$v=undefined-rv1$@$"):
180
- full_response = full_response[21:]
181
-
182
- # Strip the model prefix from the full response
183
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
184
-
185
- return {
186
- "id": f"chatcmpl-{uuid.uuid4()}",
187
- "object": "chat.completion",
188
- "created": int(datetime.now().timestamp()),
189
- "model": request.model,
190
- "choices": [
191
- {
192
- "index": 0,
193
- "message": {"role": "assistant", "content": cleaned_full_response},
194
- "finish_reason": "stop",
195
- }
196
- ],
197
- "usage": None,
198
- }