rkihacker commited on
Commit
7dbf7e8
·
verified ·
1 Parent(s): 52b25ad

Upload 14 files

Browse files
Files changed (14) hide show
  1. Dockerfile +28 -0
  2. api/__init__.py +0 -0
  3. api/app.py +41 -0
  4. api/auth.py +10 -0
  5. api/config.py +563 -0
  6. api/logger.py +54 -0
  7. api/models.py +14 -0
  8. api/routes.py +66 -0
  9. api/utils.py +533 -0
  10. api/validate.py +62 -0
  11. dockerignore +10 -0
  12. gitattributes +59 -0
  13. main.py +5 -0
  14. requirements.txt +8 -0
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python 3.10 slim image
2
+ FROM python:3.10-slim
3
+
4
+ # Set environment variables to prevent Python from writing pyc files and to buffer stdout/stderr
5
+ ENV PYTHONDONTWRITEBYTECODE=1
6
+ ENV PYTHONUNBUFFERED=1
7
+
8
+ # Set the working directory to /app
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies to get `nproc` (for number of CPU cores)
12
+ RUN apt-get update && apt-get install -y procps
13
+
14
+ # Copy only the requirements.txt first to leverage Docker cache
15
+ COPY requirements.txt .
16
+
17
+ # Install Python dependencies
18
+ RUN pip install --no-cache-dir --upgrade pip
19
+ RUN pip install --no-cache-dir -r requirements.txt
20
+
21
+ # Copy the rest of the application code
22
+ COPY . .
23
+
24
+ # Expose port 8001 to the outside world
25
+ EXPOSE 8001
26
+
27
+ # Command to run Uvicorn with a dynamic number of workers based on the CPU cores
28
+ CMD ["sh", "-c", "uvicorn main:app --host 0.0.0.0 --port 8001 --workers $(nproc)"]
api/__init__.py ADDED
File without changes
api/app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from starlette.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ from api.logger import setup_logger
5
+ from api.routes import router
6
+
7
+
8
+ logger = setup_logger(__name__)
9
+
10
+ def create_app():
11
+ app = FastAPI(
12
+ title="NiansuhAI API Gateway",
13
+ docs_url=None, # Disable Swagger UI
14
+ redoc_url=None, # Disable ReDoc
15
+ openapi_url=None, # Disable OpenAPI schema
16
+ )
17
+
18
+ # CORS settings
19
+ app.add_middleware(
20
+ CORSMiddleware,
21
+ allow_origins=["*"], # Adjust as needed for security
22
+ allow_credentials=True,
23
+ allow_methods=["*"],
24
+ allow_headers=["*"],
25
+ )
26
+
27
+ # Include routes
28
+ app.include_router(router)
29
+
30
+ # Global exception handler for better error reporting
31
+ @app.exception_handler(Exception)
32
+ async def global_exception_handler(request: Request, exc: Exception):
33
+ logger.error(f"An error occurred: {str(exc)}")
34
+ return JSONResponse(
35
+ status_code=500,
36
+ content={"message": "An internal server error occurred."},
37
+ )
38
+
39
+ return app
40
+
41
+ app = create_app()
api/auth.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Depends, HTTPException
2
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
3
+ from api.config import APP_SECRET
4
+
5
+ security = HTTPBearer()
6
+
7
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
8
+ if credentials.credentials != APP_SECRET:
9
+ raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
10
+ return credentials.credentials
api/config.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ BASE_URL = "https://blackboxaichat.onrender.com"
7
+ common_headers = {
8
+ 'accept': '*/*',
9
+ 'accept-language': 'en-US,en;q=0.9',
10
+ 'content-type': 'application/json',
11
+ 'origin': 'https://www.blackbox.ai',
12
+ 'priority': 'u=1, i',
13
+ 'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
14
+ 'sec-ch-ua-mobile': '?0',
15
+ 'sec-ch-ua-model': '""',
16
+ 'sec-ch-ua-platform': '"Windows"',
17
+ 'sec-fetch-dest': 'empty',
18
+ 'sec-fetch-mode': 'cors',
19
+ 'sec-fetch-site': 'same-origin',
20
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
21
+ }
22
+ # Header Configurations for Specific API Calls
23
+ def get_headers_api_chat(referer_url):
24
+ return {**common_headers, 'Content-Type': 'application/json', 'Referer': referer_url}
25
+
26
+ def get_headers_chat(chat_url, next_action, next_router_state_tree):
27
+ return {
28
+ **common_headers,
29
+ 'Accept': 'text/x-component',
30
+ 'Content-Type': 'text/plain;charset=UTF-8',
31
+ 'next-action': next_action,
32
+ 'next-router-state-tree': next_router_state_tree,
33
+ 'next-url': '/',
34
+ }
35
+
36
+ APP_SECRET = os.getenv("APP_SECRET")
37
+
38
+ ALLOWED_MODELS = [
39
+ {"id": "blackboxai", "name": "blackboxai"},
40
+ {"id": "niansuh-t1", "name": "niansuh-t1"},
41
+ {"id": "o3-mini", "name": "o3-mini"},
42
+ {"id": "snapzionai", "name": "snapzionai"},
43
+ {"id": "blackboxai-pro", "name": "blackboxai-pro"},
44
+ {"id": "flux", "name": "flux"},
45
+ {"id": "Llama-4-Maverick-17B-128E", "name": "Llama-4-Maverick-17B-128E"},
46
+ {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
47
+ {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
48
+ {"id": "llama-3.1-405b", "name": "llama-3.1-405"},
49
+ {"id": "gpt-4o", "name": "gpt-4o"},
50
+ {"id": "chatgpt-4o-latest", "name": "chatgpt-4o-latest"},
51
+ {"id": "x-ai/grok-vision-beta", "name": "x-ai/grok-vision-beta"},
52
+ {"id": "x-ai/grok-beta", "name": "x-ai/grok-beta"},
53
+ {"id": "anthropic/claude-3.7-sonnet:thinking", "name": "anthropic/claude-3.7-sonnet:thinking"},
54
+ {"id": "gpt-4o-2024-05-13", "name": "gpt-4o"},
55
+ {"id": "gemini-pro", "name": "Gemini-PRO"},
56
+ {"id": "gemini-flash-2.0", "name": "gemini-flash-2.0"},
57
+ {"id": "gemini-1.5-flash", "name": "gemini-flash-2.0"},
58
+ {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
59
+ {"id": "deepseek-r1", "name": "deepseek-r1"},
60
+ {"id": "deepseek-v3", "name": "deepseek-v3"},
61
+ {"id": "Mistral-Small-24B-Instruct-2501", "name": "Mistral-Small-24B-Instruct-2501"},
62
+ {"id": "Meta-Llama-3.3-70B-Instruct-Turbo", "name": "Meta-Llama-3.3-70B-Instruct-Turbo"},
63
+ {"id": "Mistral-7B-Instruct-v0.2", "name": "Mistral-7B-Instruct-v0.2"},
64
+ {"id": "deepseek-llm-67b-chat", "name": "deepseek-llm-67b-chat"},
65
+ {"id": "dbrx-instruct", "name": "dbrx-instruct"},
66
+ {"id": "Meta-Llama-3.1-405B-Instruct-Turbo", "name": "Meta-Llama-3.1-405B-Instruct-Turbo"},
67
+ {"id": "Qwen-QwQ-32B-Preview", "name": "Qwen-QwQ-32B-Preview"},
68
+ {"id": "Nous-Hermes-2-Mixtral-8x7B-DPO", "name": "Nous-Hermes-2-Mixtral-8x7B-DPO"},
69
+ {"id": "PythonAgent", "name": "python"},
70
+ {"id": "JavaAgent", "name": "java"},
71
+ {"id": "JavaScriptAgent", "name": "javascript"},
72
+ {"id": "HTMLAgent", "name": "html"},
73
+ {"id": "GoogleCloudAgent", "name": "googlecloud"},
74
+ {"id": "AndroidDeveloper", "name": "androiddeveloper"},
75
+ {"id": "SwiftDeveloper", "name": "swiftdeveloper"},
76
+ {"id": "Next.jsAgent", "name": "next.js"},
77
+ {"id": "MongoDBAgent", "name": "mongodb"},
78
+ {"id": "PyTorchAgent", "name": "pytorch"},
79
+ {"id": "ReactAgent", "name": "react"},
80
+ {"id": "XcodeAgent", "name": "xcode"},
81
+ {"id": "AngularJSAgent", "name": "angularjs"},
82
+ {"id": "HerokuAgent", "name": "heroku"},
83
+ {"id": "GodotAgent", "name": "godot"},
84
+ {"id": "GoAgent", "name": "go"},
85
+ {"id": "GitlabAgent", "name": "gitlab"},
86
+ {"id": "GitAgent", "name": "git"},
87
+ {"id": "RepoMap", "name": "repomap"},
88
+ {"id": "gemini-1.5-pro-latest", "name": "gemini-flash-2.0"},
89
+ {"id": "gemini-1.5-pro", "name": "gemini-flash-2.0"},
90
+ {"id": "claude-3-5-sonnet-20240620", "name": "claude-3-5-sonnet"},
91
+ {"id": "claude-3-5-sonnet", "name": "claude-3-5-sonnet"},
92
+ {"id": "Niansuh", "name": "niansuh"},
93
+ {"id": "o1-preview", "name": "o1-preview"},
94
+ {"id": "Claude-sonnet-3.7", "name": "Claude-sonnet-3.7"},
95
+ {"id": "claude-3-5-sonnet-x", "name": "claude-3-5-sonnet-x"},
96
+ {"id": "gpt-3.5-turbo", "name": "gpt-3.5-turbo"},
97
+ {"id": "gpt-3.5-turbo-202201", "name": "gpt-3.5-turbo-202201"},
98
+
99
+ # Added New Agents
100
+ {"id": "FlaskAgent", "name": "flask"},
101
+ {"id": "FirebaseAgent", "name": "firebase"},
102
+ {"id": "FastAPIAgent", "name": "fastapi"},
103
+ {"id": "ErlangAgent", "name": "erlang"},
104
+ {"id": "ElectronAgent", "name": "electron"},
105
+ {"id": "DockerAgent", "name": "docker"},
106
+ {"id": "DigitalOceanAgent", "name": "digitalocean"},
107
+ {"id": "BitbucketAgent", "name": "bitbucket"},
108
+ {"id": "AzureAgent", "name": "azure"},
109
+ {"id": "FlutterAgent", "name": "flutter"},
110
+ {"id": "YoutubeAgent", "name": "youtube"},
111
+ {"id": "builderAgent", "name": "builder"},
112
+ {"id": "TirexAi", "name": "TirexAi"},
113
+ {"id": "o1", "name": "o1"},
114
+
115
+
116
+
117
+ # Added New models
118
+
119
+ {"id": "openai/gpt-4.1", "name": "openai/gpt-4.1" },
120
+ {"id": "x-ai/grok-3-beta", "name": "x-ai/grok-3-beta" },
121
+ {"id": "anthropic/claude-3.7-sonnet", "name": "anthropic/claude-3.7-sonnet" },
122
+ {"id": "google/gemini-2.5-pro-exp-03-25:free", "name": "google/gemini-2.5-pro-exp-03-25:free" },
123
+ {"id": "deepseek/deepseek-r1", "name": "deepseek/deepseek-r1" },
124
+ {"id": "meta-llama/llama-4-maverick:free", "name": "meta-llama/llama-4-maverick:free" },
125
+ {"id": "mistralai/mistral-large", "name": "mistralai/mistral-large" },
126
+ {"id": "openai/chatgpt-4o-latest", "name": "openai/chatgpt-4o-latest" },
127
+ {"id": "qwen/qwen-2.5-coder-32b-instruct", "name": "qwen/qwen-2.5-coder-32b-instruct" },
128
+ {"id": "openai/gpt-4.1-mini", "name": "openai/gpt-4.1-mini" },
129
+ {"id": "openai/gpt-4.1-nano", "name": "openai/gpt-4.1-nano" },
130
+ {"id": "anthropic/claude-3.7-sonnet:thinking", "name": "anthropic/claude-3.7-sonnet:thinking" },
131
+ {"id": "anthropic/claude-3.7-sonnet:beta", "name": "anthropic/claude-3.7-sonnet:beta" },
132
+ {"id": "anthropic/claude-3.5-haiku:beta", "name": "anthropic/claude-3.5-haiku:beta" },
133
+ {"id": "anthropic/claude-3.5-haiku", "name": "anthropic/claude-3.5-haiku" },
134
+ {"id": "anthropic/claude-3.5-haiku-20241022:beta", "name": "anthropic/claude-3.5-haiku-20241022:beta" },
135
+ {"id": "anthropic/claude-3.5-haiku-20241022", "name": "anthropic/claude-3.5-haiku-20241022" },
136
+ {"id": "anthropic/claude-3.5-sonnet:beta", "name": "anthropic/claude-3.5-sonnet:beta" },
137
+ {"id": "anthropic/claude-3.5-sonnet", "name": "anthropic/claude-3.5-sonnet" },
138
+ {"id": "x-ai/grok-3-mini-beta", "name": "x-ai/grok-3-mini-beta" },
139
+ {"id": "google/gemini-2.0-flash-lite-001", "name": "google/gemini-2.0-flash-lite-001" },
140
+ {"id": "meta-llama/llama-4-maverick", "name": "meta-llama/llama-4-maverick" },
141
+ {"id": "meta-llama/llama-4-scout:free", "name": "meta-llama/llama-4-scout:free" },
142
+ {"id": "meta-llama/llama-4-scout", "name": "meta-llama/llama-4-scout" },
143
+ {"id": "nvidia/llama-3.1-nemotron-70b-instruct:free", "name": "nvidia/llama-3.1-nemotron-70b-instruct:free" },
144
+ {"id": "nvidia/llama-3.1-nemotron-70b-instruct", "name": "nvidia/llama-3.1-nemotron-70b-instruct" },
145
+ {"id": "x-ai/grok-2-vision-1212", "name": "x-ai/grok-2-vision-1212" },
146
+ {"id": "x-ai/grok-2-1212", "name": "x-ai/grok-2-1212" },
147
+ {"id": "eleutherai/llemma_7b", "name": "eleutherai/llemma_7b" },
148
+ {"id": "alfredpros/codellama-7b-instruct-solidity", "name": "alfredpros/codellama-7b-instruct-solidity" },
149
+ {"id": "arliai/qwq-32b-arliai-rpr-v1:free", "name": "arliai/qwq-32b-arliai-rpr-v1:free" },
150
+ {"id": "agentica-org/deepcoder-14b-preview:free", "name": "agentica-org/deepcoder-14b-preview:free" },
151
+ {"id": "moonshotai/kimi-vl-a3b-thinking:free", "name": "moonshotai/kimi-vl-a3b-thinking:free" },
152
+ {"id": "openrouter/optimus-alpha", "name": "openrouter/optimus-alpha" },
153
+ {"id": "nvidia/llama-3.1-nemotron-nano-8b-v1:free", "name": "nvidia/llama-3.1-nemotron-nano-8b-v1:free" },
154
+ {"id": "nvidia/llama-3.3-nemotron-super-49b-v1:free", "name": "nvidia/llama-3.3-nemotron-super-49b-v1:free" },
155
+ {"id": "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", "name": "nvidia/llama-3.1-nemotron-ultra-253b-v1:free" },
156
+ {"id": "tokyotech-llm/llama-3.1-swallow-8b-instruct-v0.3", "name": "tokyotech-llm/llama-3.1-swallow-8b-instruct-v0.3" },
157
+ {"id": "openrouter/quasar-alpha", "name": "openrouter/quasar-alpha" },
158
+ {"id": "all-hands/openhands-lm-32b-v0.1", "name": "all-hands/openhands-lm-32b-v0.1" },
159
+ {"id": "mistral/ministral-8b", "name": "mistral/ministral-8b" },
160
+ {"id": "deepseek/deepseek-v3-base:free", "name": "deepseek/deepseek-v3-base:free" },
161
+ {"id": "scb10x/llama3.1-typhoon2-8b-instruct", "name": "scb10x/llama3.1-typhoon2-8b-instruct" },
162
+ {"id": "scb10x/llama3.1-typhoon2-70b-instruct", "name": "scb10x/llama3.1-typhoon2-70b-instruct" },
163
+ {"id": "allenai/molmo-7b-d:free", "name": "allenai/molmo-7b-d:free" },
164
+ {"id": "bytedance-research/ui-tars-72b:free", "name": "bytedance-research/ui-tars-72b:free" },
165
+ {"id": "qwen/qwen2.5-vl-3b-instruct:free", "name": "qwen/qwen2.5-vl-3b-instruct:free" },
166
+ {"id": "qwen/qwen2.5-vl-32b-instruct:free", "name": "qwen/qwen2.5-vl-32b-instruct:free" },
167
+ {"id": "qwen/qwen2.5-vl-32b-instruct", "name": "qwen/qwen2.5-vl-32b-instruct" },
168
+ {"id": "deepseek/deepseek-chat-v3-0324:free", "name": "deepseek/deepseek-chat-v3-0324:free" },
169
+ {"id": "deepseek/deepseek-chat-v3-0324", "name": "deepseek/deepseek-chat-v3-0324" },
170
+ {"id": "featherless/qwerky-72b:free", "name": "featherless/qwerky-72b:free" },
171
+ {"id": "openai/o1-pro", "name": "openai/o1-pro" },
172
+ {"id": "mistralai/mistral-small-3.1-24b-instruct:free", "name": "mistralai/mistral-small-3.1-24b-instruct:free" },
173
+ {"id": "mistralai/mistral-small-3.1-24b-instruct", "name": "mistralai/mistral-small-3.1-24b-instruct" },
174
+ {"id": "open-r1/olympiccoder-7b:free", "name": "open-r1/olympiccoder-7b:free" },
175
+ {"id": "open-r1/olympiccoder-32b:free", "name": "open-r1/olympiccoder-32b:free" },
176
+ {"id": "steelskull/l3.3-electra-r1-70b", "name": "steelskull/l3.3-electra-r1-70b" },
177
+ {"id": "allenai/olmo-2-0325-32b-instruct", "name": "allenai/olmo-2-0325-32b-instruct" },
178
+ {"id": "google/gemma-3-1b-it:free", "name": "google/gemma-3-1b-it:free" },
179
+ {"id": "google/gemma-3-4b-it:free", "name": "google/gemma-3-4b-it:free" },
180
+ {"id": "google/gemma-3-4b-it", "name": "google/gemma-3-4b-it" },
181
+ {"id": "ai21/jamba-1.6-large", "name": "ai21/jamba-1.6-large" },
182
+ {"id": "ai21/jamba-1.6-mini", "name": "ai21/jamba-1.6-mini" },
183
+ {"id": "google/gemma-3-12b-it:free", "name": "google/gemma-3-12b-it:free" },
184
+ {"id": "google/gemma-3-12b-it", "name": "google/gemma-3-12b-it" },
185
+ {"id": "cohere/command-a", "name": "cohere/command-a" },
186
+ {"id": "openai/gpt-4o-mini-search-preview", "name": "openai/gpt-4o-mini-search-preview" },
187
+ {"id": "openai/gpt-4o-search-preview", "name": "openai/gpt-4o-search-preview" },
188
+ {"id": "rekaai/reka-flash-3:free", "name": "rekaai/reka-flash-3:free" },
189
+ {"id": "thedrummer/anubis-pro-105b-v1", "name": "thedrummer/anubis-pro-105b-v1" },
190
+ {"id": "latitudegames/wayfarer-large-70b-llama-3.3", "name": "latitudegames/wayfarer-large-70b-llama-3.3" },
191
+ {"id": "thedrummer/skyfall-36b-v2", "name": "thedrummer/skyfall-36b-v2" },
192
+ {"id": "microsoft/phi-4-multimodal-instruct", "name": "microsoft/phi-4-multimodal-instruct" },
193
+ {"id": "deepseek/deepseek-r1-zero:free", "name": "deepseek/deepseek-r1-zero:free" },
194
+ {"id": "qwen/qwq-32b:free", "name": "qwen/qwq-32b:free" },
195
+ {"id": "qwen/qwq-32b", "name": "qwen/qwq-32b" },
196
+ {"id": "qwen/qwen2.5-32b-instruct", "name": "qwen/qwen2.5-32b-instruct" },
197
+ {"id": "moonshotai/moonlight-16b-a3b-instruct:free", "name": "moonshotai/moonlight-16b-a3b-instruct:free" },
198
+ {"id": "nousresearch/deephermes-3-llama-3-8b-preview:free", "name": "nousresearch/deephermes-3-llama-3-8b-preview:free" },
199
+ {"id": "openai/gpt-4.5-preview", "name": "openai/gpt-4.5-preview" },
200
+ {"id": "mistralai/mistral-saba", "name": "mistralai/mistral-saba" },
201
+ {"id": "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", "name": "cognitivecomputations/dolphin3.0-r1-mistral-24b:free" },
202
+ {"id": "cognitivecomputations/dolphin3.0-mistral-24b:free", "name": "cognitivecomputations/dolphin3.0-mistral-24b:free" },
203
+ {"id": "meta-llama/llama-guard-3-8b", "name": "meta-llama/llama-guard-3-8b" },
204
+ {"id": "openai/o3-mini-high", "name": "openai/o3-mini-high" },
205
+ {"id": "allenai/llama-3.1-tulu-3-405b", "name": "allenai/llama-3.1-tulu-3-405b" },
206
+ {"id": "deepseek/deepseek-r1-distill-llama-8b", "name": "deepseek/deepseek-r1-distill-llama-8b" },
207
+ {"id": "google/gemini-2.0-flash-001", "name": "google/gemini-2.0-flash-001" },
208
+ {"id": "qwen/qwen-vl-plus", "name": "qwen/qwen-vl-plus" },
209
+ {"id": "openai/gpt-4o-mini-2024-07-18", "name": "openai/gpt-4o-mini-2024-07-18" },
210
+ {"id": "openai/gpt-4o-mini", "name": "openai/gpt-4o-mini" },
211
+ {"id": "qwen/qwen-2-7b-instruct", "name": "qwen/qwen-2-7b-instruct" },
212
+ {"id": "google/gemini-2-27b-it", "name": "google/gemini-2-27b-it" },
213
+ {"id": "alpindale/magnum-72b", "name": "alpindale/magnum-72b" },
214
+ {"id": "nousresearch/hermes-2-theta-llama-3-8b", "name": "nousresearch/hermes-2-theta-llama-3-8b" },
215
+ {"id": "google/gemini-2-9b-it:free", "name": "google/gemini-2-9b-it:free" },
216
+ {"id": "google/gemini-2-9b-it", "name": "google/gemini-2-9b-it" },
217
+ {"id": "sao10k/l3-stheno-8b", "name": "sao10k/l3-stheno-8b" },
218
+ {"id": "ai21/jamba-instruct", "name": "ai21/jamba-instruct" },
219
+ {"id": "01-ai/yi-large", "name": "01-ai/yi-large" },
220
+ {"id": "meta-llama/llama-3.1-405b", "name": "meta-llama/llama-3.1-405b" },
221
+ {"id": "nothingiisreal/mn-celeste-12b", "name": "nothingiisreal/mn-celeste-12b" },
222
+ {"id": "01-ai/yi-vision", "name": "01-ai/yi-vision" },
223
+ {"id": "01-ai/yi-large-turbo", "name": "01-ai/yi-large-turbo" },
224
+ {"id": "google/gemini-pro-1.5-exp", "name": "google/gemini-pro-1.5-exp" },
225
+ {"id": "meta-llama/llama-3.1-70b-instruct", "name": "meta-llama/llama-3.1-70b-instruct" },
226
+ {"id": "meta-llama/llama-3.1-8b-instruct:free", "name": "meta-llama/llama-3.1-8b-instruct:free" },
227
+ {"id": "meta-llama/llama-3.1-8b-instruct", "name": "meta-llama/llama-3.1-8b-instruct" },
228
+ {"id": "meta-llama/llama-3.1-405b-instruct", "name": "meta-llama/llama-3.1-405b-instruct" },
229
+ {"id": "cognitivecomputations/dolphin-llama-3-70b", "name": "cognitivecomputations/dolphin-llama-3-70b" },
230
+ {"id": "mistralai/mistral-nemo:free", "name": "mistralai/mistral-nemo:free" },
231
+ {"id": "mistralai/mistral-nemo", "name": "mistralai/mistral-nemo" },
232
+ {"id": "mistralai/codestral-mamba", "name": "mistralai/codestral-mamba" },
233
+ {"id": "x-ai/grok-vision-beta", "name": "x-ai/grok-vision-beta" },
234
+ ]
235
+
236
+ MODEL_MAPPING = {
237
+ "blackboxai": "blackboxai",
238
+ "anthropic/claude-3.7-sonnet:thinking": "anthropic/claude-3.7-sonnet:thinking",
239
+ "niansuh-t1": "niansuh-t1",
240
+ "Gemini-PRO": "Gemini-PRO",
241
+ "blackboxai-pro": "blackboxai-pro",
242
+ "gpt-4o": "gpt-4o",
243
+ "gemini-flash-2.0": "gemini-flash-2.0",
244
+ "chatgpt-4o-latest": "chatgpt-4o-latest",
245
+ "o1": "o1",
246
+ "Llama-4-Maverick-17B-128E": "Llama-4-Maverick-17B-128E",
247
+ "gemini-1.5-flash": "gemini-flash-2.0",
248
+ "deepseek-r1": "deepseek-r1",
249
+ "deepseek-v3": "deepseek-v3",
250
+ "Mistral-Small-24B-Instruct-2501": "Mistral-Small-24B-Instruct-2501",
251
+ "Meta-Llama-3.3-70B-Instruct-Turbo": "Meta-Llama-3.3-70B-Instruct-Turbo",
252
+ "Mistral-7B-Instruct-v0.2": "Mistral-7B-Instruct-v0.2",
253
+ "deepseek-llm-67b-chat": "deepseek-llm-67b-chat",
254
+ "dbrx-instruct": "dbrx-instruct",
255
+ "Meta-Llama-3.1-405B-Instruct-Turbo": "Meta-Llama-3.1-405B-Instruct-Turbo",
256
+ "Qwen-QwQ-32B-Preview": "Qwen-QwQ-32B-Preview",
257
+ "Nous-Hermes-2-Mixtral-8x7B-DPO": "Nous-Hermes-2-Mixtral-8x7B-DPO",
258
+ # Additional mappings
259
+ "gemini-flash": "gemini-flash-2.0",
260
+ "claude-sonnet-3.5": "Claude-Sonnet-3.5",
261
+ "gemini-1.5-pro-latest": "gemini-flash-2.0",
262
+ "gemini-1.5-pro": "gemini-flash-2.0",
263
+ "claude-3-5-sonnet-20240620": "Claude-Sonnet-3.5",
264
+ "Niansuh": "niansuh",
265
+ "o1-preview": "o1-preview",
266
+ "Claude-sonnet-3.7": "Claude-sonnet-3.7",
267
+ "claude-3-5-sonnet-x": "claude-3-5-sonnet-x",
268
+ "gpt-3.5-turbo": "gpt-3.5-turbo",
269
+ "gpt-3.5-turbo-202201": "gpt-3.5-turbo-202201",
270
+
271
+ # Added New Agents
272
+ "FlaskAgent": "flask",
273
+ "FirebaseAgent": "firebase",
274
+ "FastAPIAgent": "fastapi",
275
+ "ErlangAgent": "erlang",
276
+ "ElectronAgent": "electron",
277
+ "DockerAgent": "docker",
278
+ "DigitalOceanAgent": "digitalocean",
279
+ "BitbucketAgent": "bitbucket",
280
+ "AzureAgent": "azure",
281
+ "FlutterAgent": "flutter",
282
+ "YoutubeAgent": "youtube",
283
+ "builderAgent": "builder",
284
+ "TirexAi": "TirexAi",
285
+ "o3-mini": "o3-mini",
286
+
287
+
288
+ # Added new Models
289
+
290
+ "openai/gpt-4.1": "openai/gpt-4.1",
291
+ "x-ai/grok-3-beta": "x-ai/grok-3-beta",
292
+ "anthropic/claude-3.7-sonnet": "anthropic/claude-3.7-sonnet",
293
+ "google/gemini-2.5-pro-exp-03-25:free": "google/gemini-2.5-pro-exp-03-25:free",
294
+ "deepseek/deepseek-r1": "deepseek/deepseek-r1",
295
+ "meta-llama/llama-4-maverick:free": "meta-llama/llama-4-maverick:free",
296
+ "mistralai/mistral-large": "mistralai/mistral-large",
297
+ "openai/chatgpt-4o-latest": "openai/chatgpt-4o-latest",
298
+ "qwen/qwen-2.5-coder-32b-instruct": "qwen/qwen-2.5-coder-32b-instruct",
299
+ "openai/gpt-4.1-mini": "openai/gpt-4.1-mini",
300
+ "openai/gpt-4.1-nano": "openai/gpt-4.1-nano",
301
+ "anthropic/claude-3.7-sonnet:thinking": "anthropic/claude-3.7-sonnet:thinking",
302
+ "anthropic/claude-3.7-sonnet:beta": "anthropic/claude-3.7-sonnet:beta",
303
+ "anthropic/claude-3.5-haiku:beta": "anthropic/claude-3.5-haiku:beta",
304
+ "anthropic/claude-3.5-haiku": "anthropic/claude-3.5-haiku",
305
+ "anthropic/claude-3.5-haiku-20241022:beta": "anthropic/claude-3.5-haiku-20241022:beta",
306
+ "anthropic/claude-3.5-haiku-20241022": "anthropic/claude-3.5-haiku-20241022",
307
+ "anthropic/claude-3.5-sonnet:beta": "anthropic/claude-3.5-sonnet:beta",
308
+ "anthropic/claude-3.5-sonnet": "anthropic/claude-3.5-sonnet",
309
+ "x-ai/grok-3-mini-beta": "x-ai/grok-3-mini-beta",
310
+ "google/gemini-2.0-flash-lite-001": "google/gemini-2.0-flash-lite-001",
311
+ "meta-llama/llama-4-maverick": "meta-llama/llama-4-maverick",
312
+ "meta-llama/llama-4-scout:free": "meta-llama/llama-4-scout:free",
313
+ "meta-llama/llama-4-scout": "meta-llama/llama-4-scout",
314
+ "nvidia/llama-3.1-nemotron-70b-instruct:free": "nvidia/llama-3.1-nemotron-70b-instruct:free",
315
+ "nvidia/llama-3.1-nemotron-70b-instruct": "nvidia/llama-3.1-nemotron-70b-instruct",
316
+ "x-ai/grok-2-vision-1212": "x-ai/grok-2-vision-1212",
317
+ "x-ai/grok-2-1212": "x-ai/grok-2-1212",
318
+ "eleutherai/llemma_7b": "eleutherai/llemma_7b",
319
+ "alfredpros/codellama-7b-instruct-solidity": "alfredpros/codellama-7b-instruct-solidity",
320
+ "arliai/qwq-32b-arliai-rpr-v1:free": "arliai/qwq-32b-arliai-rpr-v1:free",
321
+ "agentica-org/deepcoder-14b-preview:free": "agentica-org/deepcoder-14b-preview:free",
322
+ "moonshotai/kimi-vl-a3b-thinking:free": "moonshotai/kimi-vl-a3b-thinking:free",
323
+ "openrouter/optimus-alpha": "openrouter/optimus-alpha",
324
+ "nvidia/llama-3.1-nemotron-nano-8b-v1:free": "nvidia/llama-3.1-nemotron-nano-8b-v1:free",
325
+ "nvidia/llama-3.3-nemotron-super-49b-v1:free": "nvidia/llama-3.3-nemotron-super-49b-v1:free",
326
+ "nvidia/llama-3.1-nemotron-ultra-253b-v1:free": "nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
327
+ "tokyotech-llm/llama-3.1-swallow-8b-instruct-v0.3": "tokyotech-llm/llama-3.1-swallow-8b-instruct-v0.3",
328
+ "openrouter/quasar-alpha": "openrouter/quasar-alpha",
329
+ "all-hands/openhands-lm-32b-v0.1": "all-hands/openhands-lm-32b-v0.1",
330
+ "mistral/ministral-8b": "mistral/ministral-8b",
331
+ "deepseek/deepseek-v3-base:free": "deepseek/deepseek-v3-base:free",
332
+ "scb10x/llama3.1-typhoon2-8b-instruct": "scb10x/llama3.1-typhoon2-8b-instruct",
333
+ "scb10x/llama3.1-typhoon2-70b-instruct": "scb10x/llama3.1-typhoon2-70b-instruct",
334
+ "allenai/molmo-7b-d:free": "allenai/molmo-7b-d:free",
335
+ "bytedance-research/ui-tars-72b:free": "bytedance-research/ui-tars-72b:free",
336
+ "qwen/qwen2.5-vl-3b-instruct:free": "qwen/qwen2.5-vl-3b-instruct:free",
337
+ "qwen/qwen2.5-vl-32b-instruct:free": "qwen/qwen2.5-vl-32b-instruct:free",
338
+ "qwen/qwen2.5-vl-32b-instruct": "qwen/qwen2.5-vl-32b-instruct",
339
+ "deepseek/deepseek-chat-v3-0324:free": "deepseek/deepseek-chat-v3-0324:free",
340
+ "deepseek/deepseek-chat-v3-0324": "deepseek/deepseek-chat-v3-0324",
341
+ "featherless/qwerky-72b:free": "featherless/qwerky-72b:free",
342
+ "openai/o1-pro": "openai/o1-pro",
343
+ "mistralai/mistral-small-3.1-24b-instruct:free": "mistralai/mistral-small-3.1-24b-instruct:free",
344
+ "mistralai/mistral-small-3.1-24b-instruct": "mistralai/mistral-small-3.1-24b-instruct",
345
+ "open-r1/olympiccoder-7b:free": "open-r1/olympiccoder-7b:free",
346
+ "open-r1/olympiccoder-32b:free": "open-r1/olympiccoder-32b:free",
347
+ "steelskull/l3.3-electra-r1-70b": "steelskull/l3.3-electra-r1-70b",
348
+ "allenai/olmo-2-0325-32b-instruct": "allenai/olmo-2-0325-32b-instruct",
349
+ "google/gemma-3-1b-it:free": "google/gemma-3-1b-it:free",
350
+ "google/gemma-3-4b-it:free": "google/gemma-3-4b-it:free",
351
+ "google/gemma-3-4b-it": "google/gemma-3-4b-it",
352
+ "ai21/jamba-1.6-large": "ai21/jamba-1.6-large",
353
+ "ai21/jamba-1.6-mini": "ai21/jamba-1.6-mini",
354
+ "google/gemma-3-12b-it:free": "google/gemma-3-12b-it:free",
355
+ "google/gemma-3-12b-it": "google/gemma-3-12b-it",
356
+ "cohere/command-a": "cohere/command-a",
357
+ "openai/gpt-4o-mini-search-preview": "openai/gpt-4o-mini-search-preview",
358
+ "openai/gpt-4o-search-preview": "openai/gpt-4o-search-preview",
359
+ "rekaai/reka-flash-3:free": "rekaai/reka-flash-3:free",
360
+ "thedrummer/anubis-pro-105b-v1": "thedrummer/anubis-pro-105b-v1",
361
+ "latitudegames/wayfarer-large-70b-llama-3.3": "latitudegames/wayfarer-large-70b-llama-3.3",
362
+ "thedrummer/skyfall-36b-v2": "thedrummer/skyfall-36b-v2",
363
+ "microsoft/phi-4-multimodal-instruct": "microsoft/phi-4-multimodal-instruct",
364
+ "deepseek/deepseek-r1-zero:free": "deepseek/deepseek-r1-zero:free",
365
+ "qwen/qwq-32b:free": "qwen/qwq-32b:free",
366
+ "qwen/qwq-32b": "qwen/qwq-32b",
367
+ "qwen/qwen2.5-32b-instruct": "qwen/qwen2.5-32b-instruct",
368
+ "moonshotai/moonlight-16b-a3b-instruct:free": "moonshotai/moonlight-16b-a3b-instruct:free",
369
+ "nousresearch/deephermes-3-llama-3-8b-preview:free": "nousresearch/deephermes-3-llama-3-8b-preview:free",
370
+ "openai/gpt-4.5-preview": "openai/gpt-4.5-preview",
371
+ "mistralai/mistral-saba": "mistralai/mistral-saba",
372
+ "cognitivecomputations/dolphin3.0-r1-mistral-24b:free": "cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
373
+ "cognitivecomputations/dolphin3.0-mistral-24b:free": "cognitivecomputations/dolphin3.0-mistral-24b:free",
374
+ "meta-llama/llama-guard-3-8b": "meta-llama/llama-guard-3-8b",
375
+ "openai/o3-mini-high": "openai/o3-mini-high",
376
+ "allenai/llama-3.1-tulu-3-405b": "allenai/llama-3.1-tulu-3-405b",
377
+ "deepseek/deepseek-r1-distill-llama-8b": "deepseek/deepseek-r1-distill-llama-8b",
378
+ "google/gemini-2.0-flash-001": "google/gemini-2.0-flash-001",
379
+ "qwen/qwen-vl-plus": "qwen/qwen-vl-plus",
380
+ "openai/gpt-4o-mini-2024-07-18": "openai/gpt-4o-mini-2024-07-18",
381
+ "openai/gpt-4o-mini": "openai/gpt-4o-mini",
382
+ "qwen/qwen-2-7b-instruct": "qwen/qwen-2-7b-instruct",
383
+ "google/gemini-2-27b-it": "google/gemini-2-27b-it",
384
+ "alpindale/magnum-72b": "alpindale/magnum-72b",
385
+ "nousresearch/hermes-2-theta-llama-3-8b": "nousresearch/hermes-2-theta-llama-3-8b",
386
+ "google/gemini-2-9b-it:free": "google/gemini-2-9b-it:free",
387
+ "google/gemini-2-9b-it": "google/gemini-2-9b-it",
388
+ "sao10k/l3-stheno-8b": "sao10k/l3-stheno-8b",
389
+ "ai21/jamba-instruct": "ai21/jamba-instruct",
390
+ "01-ai/yi-large": "01-ai/yi-large",
391
+ "meta-llama/llama-3.1-405b": "meta-llama/llama-3.1-405b",
392
+ "nothingiisreal/mn-celeste-12b": "nothingiisreal/mn-celeste-12b",
393
+ "01-ai/yi-vision": "01-ai/yi-vision",
394
+ "01-ai/yi-large-turbo": "01-ai/yi-large-turbo",
395
+ "google/gemini-pro-1.5-exp": "google/gemini-pro-1.5-exp",
396
+ "meta-llama/llama-3.1-70b-instruct": "meta-llama/llama-3.1-70b-instruct",
397
+ "meta-llama/llama-3.1-8b-instruct:free": "meta-llama/llama-3.1-8b-instruct:free",
398
+ "meta-llama/llama-3.1-8b-instruct": "meta-llama/llama-3.1-8b-instruct",
399
+ "meta-llama/llama-3.1-405b-instruct": "meta-llama/llama-3.1-405b-instruct",
400
+ "cognitivecomputations/dolphin-llama-3-70b": "cognitivecomputations/dolphin-llama-3-70b",
401
+ "mistralai/mistral-nemo:free": "mistralai/mistral-nemo:free",
402
+ "mistralai/mistral-nemo": "mistralai/mistral-nemo",
403
+ "mistralai/codestral-mamba": "mistralai/codestral-mamba",
404
+ "x-ai/grok-vision-beta": "x-ai/grok-vision-beta",
405
+
406
+ }
407
+
408
+ # Agent modes
409
+ AGENT_MODE = {
410
+ 'claude-sonnet-3.5': {'mode': True, 'id': "claude-sonnet-3.5", 'name': "claude-sonnet-3.5"},
411
+ 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
412
+ 'gpt-4o': {'mode': True, 'id': "GPT-4o", 'name': "GPT-4o"},
413
+ 'Llama-4-Maverick-17B-128E': {'mode': True, 'id': "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", 'name': "Llama-4-Maverick-17B-128E"},
414
+ 'o3-mini': {'mode': True, 'id': "o3-mini", 'name': "o3-mini"},
415
+ 'o1': {'mode': True, 'id': "o1", 'name': "o1"},
416
+ 'gemini-flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "gemini-flash-2.0"},
417
+ 'Claude-Sonnet-3.7': {'mode': True, 'id': "Claude-Sonnet-3.7", 'name': "Claude-Sonnet-3.7"},
418
+ 'niansuh-t1': {'mode': True, 'id': "Niansuh-T1ybc2YUh", 'name': "niansuh-t1"},
419
+ 'chatgpt-4o-latest': {'mode': True, 'id': "GPT-4oyTDtGrw", 'name': "chatgpt-4o-latest"},
420
+ 'TirexAi': {'mode': True, 'id': "TierXxsRieQK", 'name': "TirexAi"},
421
+ 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "niansuh"},
422
+ 'snapzionai': {'mode': True, 'id': "Prompt-AnalyzerpINlz9W", 'name': "snapzionai"},
423
+ 'o1-preview': {'mode': True, 'id': "o1Dst8La8", 'name': "o1-preview"},
424
+ 'claude-3-5-sonnet-20241022': {'mode': True, 'id': "Claude-Sonnet-3.5zO2HZSF", 'name': "claude-3-5-sonnet-20241022"},
425
+ 'claude-3-5-sonnet-x': {'mode': True, 'id': "Claude-Sonnet-3.52022JE0UdQ3", 'name': "claude-3-5-sonnet-x"},
426
+ 'gpt-3.5-turbo': {'mode': True, 'id': "GPT-3.5-TurboYxtGz0H", 'name': "gpt-3.5-turbo"},
427
+ 'gpt-3.5-turbo-202201': {'mode': True, 'id': "GPT-3.5-Turbo-202201PNWREyV", 'name': "gpt-3.5-turbo-202201"},
428
+ 'deepseek-r1': {'mode': True, 'id': "deepseek-reasoner", 'name': "deepseek-r1"},
429
+ 'deepseek-v3': {'mode': True, 'id': "deepseek-chat", 'name': "deepseek-v3"},
430
+ 'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
431
+ 'Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-7B-Instruct-v0.2"},
432
+ 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
433
+ 'deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "deepseek-llm-67b-chat"},
434
+ 'dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "dbrx-instruct"},
435
+ 'Meta-Llama-3.1-405B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", 'name': "Meta-Llama-3.1-405B-Instruct-Turbo"},
436
+ 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
437
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
438
+
439
+
440
+ # Added New Models
441
+
442
+ 'openai/gpt-4.1': {'mode': True, 'id': "openai/gpt-4.1", 'name': "openai/gpt-4.1"},
443
+ 'openai/gpt-4o-search-preview': {'mode': True, 'id': "openai/gpt-4o-search-preview", 'name': "openai/gpt-4o-search-preview"},
444
+ 'openai/gpt-4o-mini-search-preview': {'mode': True, 'id': "openai/gpt-4o-mini-search-preview", 'name': "openai/gpt-4o-mini-search-preview"},
445
+ 'x-ai/grok-3-beta': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "x-ai/grok-3-beta"},
446
+ 'anthropic/claude-3.7-sonnet': {'mode': True, 'id': "anthropic/claude-3.7-sonnet", 'name': "anthropic/claude-3.7-sonnet"},
447
+ 'google/gemini-2.5-pro-exp-03-25:free': {'mode': True, 'id': "google/gemini-2.5-pro-exp-03-25:free", 'name': "google/gemini-2.5-pro-exp-03-25:free"},
448
+ 'deepseek/deepseek-r1': {'mode': True, 'id': "deepseek/deepseek-r1", 'name': "deepseek/deepseek-r1"},
449
+ 'meta-llama/llama-4-maverick:free': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "meta-llama/llama-4-maverick:free"},
450
+ 'mistralai/mistral-large': {'mode': True, 'id': "mistralai/mistral-large", 'name': "mistralai/mistral-large"},
451
+ 'openai/chatgpt-4o-latest': {'mode': True, 'id': "openai/chatgpt-4o-latest", 'name': "openai/chatgpt-4o-latest"},
452
+ 'qwen/qwen-2.5-coder-32b-instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct", 'name': "qwen/qwen-2.5-coder-32b-instruct"},
453
+ 'openai/gpt-4.1-mini': {'mode': True, 'id': "openai/gpt-4.1-mini", 'name': "openai/gpt-4.1-mini"},
454
+ 'openai/gpt-4.1-nano': {'mode': True, 'id': "openai/gpt-4.1-nano", 'name': "openai/gpt-4.1-nano"},
455
+ 'anthropic/claude-3.7-sonnet:thinking': {'mode': True, 'id': "anthropic/claude-3.7-sonnet:thinking", 'name': "anthropic/claude-3.7-sonnet:thinking"},
456
+ 'anthropic/claude-3.7-sonnet:beta': {'mode': True, 'id': "anthropic/claude-3.7-sonnet:beta", 'name': "anthropic/claude-3.7-sonnet:beta"},
457
+ 'anthropic/claude-3.5-haiku:beta': {'mode': True, 'id': "anthropic/claude-3.5-haiku:beta", 'name': "anthropic/claude-3.5-haiku:beta"},
458
+ 'anthropic/claude-3.5-haiku': {'mode': True, 'id': "anthropic/claude-3.5-haiku", 'name': "anthropic/claude-3.5-haiku"},
459
+ 'anthropic/claude-3.5-haiku-20241022:beta': {'mode': True, 'id': "anthropic/claude-3.5-haiku-20241022:beta", 'name': "anthropic/claude-3.5-haiku-20241022:beta"},
460
+ 'anthropic/claude-3.5-haiku-20241022': {'mode': True, 'id': "anthropic/claude-3.5-haiku-20241022", 'name': "anthropic/claude-3.5-haiku-20241022"},
461
+ 'anthropic/claude-3.5-sonnet:beta': {'mode': True, 'id': "anthropic/claude-3.5-sonnet:beta", 'name': "anthropic/claude-3.5-sonnet:beta"},
462
+ 'anthropic/claude-3.5-sonnet': {'mode': True, 'id': "anthropic/claude-3.5-sonnet", 'name': "anthropic/claude-3.5-sonnet"},
463
+ 'x-ai/grok-3-mini-beta': {'mode': True, 'id': "x-ai/grok-3-mini-beta", 'name': "x-ai/grok-3-mini-beta"},
464
+ 'google/gemini-2.0-flash-lite-001': {'mode': True, 'id': "google/gemini-2.0-flash-lite-001", 'name': "google/gemini-2.0-flash-lite-001"},
465
+ 'meta-llama/llama-4-maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick", 'name': "meta-llama/llama-4-maverick"},
466
+ 'meta-llama/llama-4-scout:free': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "meta-llama/llama-4-scout:free"},
467
+ 'meta-llama/llama-4-scout': {'mode': True, 'id': "meta-llama/llama-4-scout", 'name': "meta-llama/llama-4-scout"},
468
+ 'nvidia/llama-3.1-nemotron-70b-instruct:free': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-70b-instruct:free", 'name': "nvidia/llama-3.1-nemotron-70b-instruct:free"},
469
+ 'nvidia/llama-3.1-nemotron-70b-instruct': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-70b-instruct", 'name': "nvidia/llama-3.1-nemotron-70b-instruct"},
470
+ 'x-ai/grok-2-vision-1212': {'mode': True, 'id': "x-ai/grok-2-vision-1212", 'name': "x-ai/grok-2-vision-1212"},
471
+ 'x-ai/grok-2-1212': {'mode': True, 'id': "x-ai/grok-2-1212", 'name': "x-ai/grok-2-1212"},
472
+ 'eleutherai/llemma_7b': {'mode': True, 'id': "eleutherai/llemma_7b", 'name': "eleutherai/llemma_7b"},
473
+ 'alfredpros/codellama-7b-instruct-solidity': {'mode': True, 'id': "alfredpros/codellama-7b-instruct-solidity", 'name': "alfredpros/codellama-7b-instruct-solidity"},
474
+ 'arliai/qwq-32b-arliai-rpr-v1:free': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "arliai/qwq-32b-arliai-rpr-v1:free"},
475
+ 'agentica-org/deepcoder-14b-preview:free': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "agentica-org/deepcoder-14b-preview:free"},
476
+ 'moonshotai/kimi-vl-a3b-thinking:free': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "moonshotai/kimi-vl-a3b-thinking:free"},
477
+ 'openrouter/optimus-alpha': {'mode': True, 'id': "openrouter/optimus-alpha", 'name': "openrouter/optimus-alpha"},
478
+ 'nvidia/llama-3.1-nemotron-nano-8b-v1:free': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-nano-8b-v1:free", 'name': "nvidia/llama-3.1-nemotron-nano-8b-v1:free"},
479
+
480
+
481
+
482
+ }
483
+
484
+ TRENDING_AGENT_MODE = {
485
+ "blackboxai": {},
486
+ "gemini-1.5-flash": {'mode': True, 'id': 'gemini'},
487
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
488
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
489
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
490
+ 'blackboxai-pro': {'mode': True, 'id': "blackboxai-pro"},
491
+ 'PythonAgent': {'mode': True, 'id': "python"},
492
+ 'JavaAgent': {'mode': True, 'id': "java"},
493
+ 'JavaScriptAgent': {'mode': True, 'id': "javascript"},
494
+ 'HTMLAgent': {'mode': True, 'id': "html"},
495
+ 'GoogleCloudAgent': {'mode': True, 'id': "googlecloud"},
496
+ 'AndroidDeveloper': {'mode': True, 'id': "android"},
497
+ 'SwiftDeveloper': {'mode': True, 'id': "swift"},
498
+ 'Next.jsAgent': {'mode': True, 'id': "next.js"},
499
+ 'MongoDBAgent': {'mode': True, 'id': "mongodb"},
500
+ 'PyTorchAgent': {'mode': True, 'id': "pytorch"},
501
+ 'ReactAgent': {'mode': True, 'id': "react"},
502
+ 'XcodeAgent': {'mode': True, 'id': "xcode"},
503
+ 'AngularJSAgent': {'mode': True, 'id': "angularjs"},
504
+ 'HerokuAgent': {'mode': True, 'id': "heroku"},
505
+ 'GodotAgent': {'mode': True, 'id': "godot"},
506
+ 'GoAgent': {'mode': True, 'id': "go"},
507
+ 'GitlabAgent': {'mode': True, 'id': "gitlab"},
508
+ 'GitAgent': {'mode': True, 'id': "git"},
509
+ 'RepoMap': {'mode': True, 'id': "repomap"},
510
+ 'FlaskAgent': {'mode': True, 'id': "flask"},
511
+ 'FirebaseAgent': {'mode': True, 'id': "firebase"},
512
+ 'FastAPIAgent': {'mode': True, 'id': "fastapi"},
513
+ 'ErlangAgent': {'mode': True, 'id': "erlang"},
514
+ 'ElectronAgent': {'mode': True, 'id': "electron"},
515
+ 'DockerAgent': {'mode': True, 'id': "docker"},
516
+ 'DigitalOceanAgent': {'mode': True, 'id': "digitalocean"},
517
+ 'BitbucketAgent': {'mode': True, 'id': "bitbucket"},
518
+ 'AzureAgent': {'mode': True, 'id': "azure"},
519
+ 'FlutterAgent': {'mode': True, 'id': "flutter"},
520
+ 'YoutubeAgent': {'mode': True, 'id': "youtube"},
521
+ 'builderAgent': {'mode': True, 'id': "builder"},
522
+ }
523
+
524
+ # Model prefixes
525
+ MODEL_PREFIXES = {
526
+ 'PythonAgent': '@python',
527
+ 'JavaAgent': '@java',
528
+ 'JavaScriptAgent': '@javascript',
529
+ 'HTMLAgent': '@html',
530
+ 'GoogleCloudAgent': '@googlecloud',
531
+ 'AndroidDeveloper': '@android',
532
+ 'SwiftDeveloper': '@swift',
533
+ 'Next.jsAgent': '@next.js',
534
+ 'MongoDBAgent': '@mongodb',
535
+ 'PyTorchAgent': '@pytorch',
536
+ 'ReactAgent': '@react',
537
+ 'XcodeAgent': '@xcode',
538
+ 'AngularJSAgent': '@angularjs',
539
+ 'HerokuAgent': '@heroku',
540
+ 'GodotAgent': '@godot',
541
+ 'GoAgent': '@go',
542
+ 'GitlabAgent': '@gitlab',
543
+ 'GitAgent': '@git',
544
+ 'blackboxai-pro': '@blackboxai-pro',
545
+ 'FlaskAgent': '@flask',
546
+ 'FirebaseAgent': '@firebase',
547
+ 'FastAPIAgent': '@fastapi',
548
+ 'ErlangAgent': '@erlang',
549
+ 'ElectronAgent': '@electron',
550
+ 'DockerAgent': '@docker',
551
+ 'DigitalOceanAgent': '@digitalocean',
552
+ 'BitbucketAgent': '@bitbucket',
553
+ 'AzureAgent': '@azure',
554
+ 'FlutterAgent': '@flutter',
555
+ 'YoutubeAgent': '@youtube',
556
+ 'builderAgent': '@builder',
557
+ }
558
+
559
+ # Model referers
560
+ MODEL_REFERERS = {
561
+ "blackboxai": "/?model=blackboxai",
562
+ "blackboxai-pro": "/?model=blackboxai-pro",
563
+ }
api/logger.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ # Setup logger with a consistent format
4
+ def setup_logger(name):
5
+ logger = logging.getLogger(name)
6
+ if not logger.handlers:
7
+ logger.setLevel(logging.INFO)
8
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
9
+
10
+ # Console handler
11
+ console_handler = logging.StreamHandler()
12
+ console_handler.setFormatter(formatter)
13
+ logger.addHandler(console_handler)
14
+
15
+ # File Handler - Error Level
16
+ # error_file_handler = logging.FileHandler('error.log')
17
+ # error_file_handler.setFormatter(formatter)
18
+ # error_file_handler.setLevel(logging.ERROR)
19
+ # logger.addHandler(error_file_handler)
20
+
21
+ return logger
22
+
23
+ logger = setup_logger(__name__)
24
+
25
+ # Log functions to structure specific logs in utils.py
26
+ def log_generated_chat_id_with_referer(chat_id, model, referer_url):
27
+ """
28
+ Log the generated Chat ID with model and referer URL if it exists.
29
+ """
30
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {model} - URL: {referer_url}")
31
+
32
+ def log_model_delay(delay_seconds, model, chat_id):
33
+ """
34
+ Log the delay introduced for specific models.
35
+ """
36
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model '{model}' (Chat ID: {chat_id})")
37
+
38
+ def log_http_error(error, chat_id):
39
+ """
40
+ Log HTTP errors encountered during requests.
41
+ """
42
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {error}")
43
+
44
+ def log_request_error(error, chat_id):
45
+ """
46
+ Log request errors unrelated to HTTP status.
47
+ """
48
+ logger.error(f"Request error occurred for Chat ID {chat_id}: {error}")
49
+
50
+ def log_strip_prefix(model_prefix, content):
51
+ """
52
+ Log when a model prefix is stripped from the content.
53
+ """
54
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
api/models.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ from pydantic import BaseModel
3
+
4
+ class Message(BaseModel):
5
+ role: str
6
+ content: str | list
7
+
8
+ class ChatRequest(BaseModel):
9
+ model: str
10
+ messages: List[Message]
11
+ stream: Optional[bool] = False
12
+ temperature: Optional[float] = 0.5
13
+ top_p: Optional[float] = 0.9
14
+ max_tokens: Optional[int] = 1024
api/routes.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
+ from fastapi.responses import StreamingResponse
4
+ from api.auth import verify_app_secret
5
+ from api.config import ALLOWED_MODELS
6
+ from api.models import ChatRequest
7
+ from api.utils import process_non_streaming_response, process_streaming_response
8
+ from api.logger import setup_logger
9
+
10
+ logger = setup_logger(__name__)
11
+
12
+ router = APIRouter()
13
+
14
+ @router.options("/us/v1/chat/completions")
15
+ @router.options("/us/api/v1/chat/completions")
16
+ async def chat_completions_options():
17
+ return Response(
18
+ status_code=200,
19
+ headers={
20
+ "Access-Control-Allow-Origin": "*",
21
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
22
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
+ },
24
+ )
25
+
26
+ @router.get("/us/v1/models")
27
+ @router.get("/us/api/v1/models")
28
+ async def list_models():
29
+ return {"object": "list", "data": ALLOWED_MODELS}
30
+
31
+ @router.post("/us/v1/chat/completions")
32
+ @router.post("/us/api/v1/chat/completions")
33
+ async def chat_completions(
34
+ request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
+ ):
36
+ logger.info("Entering chat_completions route")
37
+ logger.info(f"Processing chat completion request for model: {request.model}")
38
+
39
+ if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
+ raise HTTPException(
41
+ status_code=400,
42
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
+ )
44
+
45
+ if request.stream:
46
+ logger.info("Streaming response")
47
+ return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
+ else:
49
+ logger.info("Non-streaming response")
50
+ return await process_non_streaming_response(request)
51
+
52
+ @router.route('/us/')
53
+ @router.route('/us/healthz')
54
+ @router.route('/us/ready')
55
+ @router.route('/us/alive')
56
+ @router.route('/us/status')
57
+ @router.get("/us/health")
58
+ def health_check(request: Request):
59
+ return Response(
60
+ content=json.dumps({
61
+ "message": "Welcome to the NiansuhAI API!",
62
+ "inspiration": "Failure is the first step to success."
63
+ }),
64
+ media_type="application/json",
65
+ status_code=421 # Changing the status code to 421
66
+ )
api/utils.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import random
4
+ import re
5
+ import string
6
+ import time
7
+ import uuid
8
+ from datetime import datetime, timezone
9
+ from typing import Any, Dict, List, Optional
10
+
11
+ import boto3
12
+ import httpx
13
+ import tiktoken
14
+ import platform
15
+ import hashlib
16
+ from fastapi import HTTPException
17
+
18
+ from api.config import (
19
+ MODEL_MAPPING,
20
+ get_headers_api_chat,
21
+ get_headers_chat,
22
+ BASE_URL,
23
+ AGENT_MODE,
24
+ TRENDING_AGENT_MODE,
25
+ MODEL_PREFIXES
26
+ )
27
+ from api.logger import setup_logger
28
+ from api.models import ChatRequest
29
+ from api.validate import getHid # Import the asynchronous getHid function
30
+
31
+ logger = setup_logger(__name__)
32
+
33
+ # ---------------------------------------------
34
+ # CLOUDFLARE R2 CONFIGURATION
35
+ # ---------------------------------------------
36
+ R2_ACCESS_KEY_ID = "df9c9eb87e850a8eb27afd3968077b42"
37
+ R2_SECRET_ACCESS_KEY = "14b08b0855263bb63d2618da3a6537e1b0446d89d51da03a568620b1e5342ea8"
38
+ R2_ENDPOINT_URL = "https://f2f92ac53fae792c4155f6e93a514989.r2.cloudflarestorage.com"
39
+ R2_BUCKET_NAME = "snapzion"
40
+
41
+ # We always store replaced URLs in one file named snapzion.txt
42
+ R2_REPLACED_URLS_KEY = "snapzion.txt"
43
+
44
+ s3 = boto3.client(
45
+ "s3",
46
+ endpoint_url=R2_ENDPOINT_URL,
47
+ aws_access_key_id=R2_ACCESS_KEY_ID,
48
+ aws_secret_access_key=R2_SECRET_ACCESS_KEY,
49
+ )
50
+
51
+ # Example blocked message
52
+ BLOCKED_MESSAGE = (
53
+ "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai "
54
+ "and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai"
55
+ )
56
+
57
+ # ---------------------------------------------
58
+ # RANDOM USER-DATA GENERATION
59
+ # ---------------------------------------------
60
+ def get_random_name_email_customer():
61
+ """
62
+ Generate a random name, email, and customer ID.
63
+ The customer ID keeps the same length format as 'cus_Rldf7IKdNhdhiw'.
64
+ """
65
+ first_names = ["Alice", "Bob", "Carol", "David", "Evelyn", "Frank", "Grace", "Hector", "Ivy", "Jackie"]
66
+ last_names = ["Smith", "Johnson", "Davis", "Miller", "Thompson", "Garcia", "Brown", "Wilson", "Martin", "Clark"]
67
+
68
+ random_name = f"{random.choice(first_names)} {random.choice(last_names)}"
69
+ email_username = ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))
70
+ random_email = f"{email_username}@gmail.com"
71
+ suffix_length = len("Rldf7IKdNhdhiw")
72
+ suffix_chars = string.ascii_letters + string.digits
73
+ random_suffix = ''.join(random.choice(suffix_chars) for _ in range(suffix_length))
74
+ random_customer_id = f"cus_{random_suffix}"
75
+
76
+ return random_name, random_email, random_customer_id
77
+
78
+ # ---------------------------------------------
79
+ # HELPER FUNCTIONS
80
+ # ---------------------------------------------
81
+ def generate_system_fingerprint() -> str:
82
+ raw_data = f"{platform.node()}-{time.time()}-{uuid.uuid4()}"
83
+ short_hash = hashlib.md5(raw_data.encode()).hexdigest()[:12]
84
+ return f"fp_{short_hash}"
85
+
86
+ def get_last_user_prompt(messages: List[Any]) -> str:
87
+ for msg in reversed(messages):
88
+ if msg.role == "user":
89
+ if isinstance(msg.content, str):
90
+ return msg.content.strip()
91
+ elif isinstance(msg.content, list):
92
+ for item in msg.content:
93
+ if item.get("type") == "text":
94
+ return item.get("text", "").strip()
95
+ return ""
96
+
97
+ def upload_replaced_urls_to_r2(urls: List[str], alt_text: str = "") -> None:
98
+ if not urls:
99
+ logger.info("No replaced or final Snapzion URLs to store. Skipping snapzion.txt update.")
100
+ return
101
+
102
+ existing_data = ""
103
+ try:
104
+ response = s3.get_object(Bucket=R2_BUCKET_NAME, Key=R2_REPLACED_URLS_KEY)
105
+ existing_data = response['Body'].read().decode('utf-8')
106
+ logger.info("Successfully read existing snapzion.txt from R2.")
107
+ except s3.exceptions.NoSuchKey:
108
+ logger.info("snapzion.txt does not exist yet. Will create a new one.")
109
+ except Exception as e:
110
+ logger.error(f"Error reading snapzion.txt from R2: {e}")
111
+
112
+ alt_text = alt_text.strip()
113
+ markdown_lines = [f"![{alt_text}]({url})" for url in urls]
114
+ to_append = "\n".join(markdown_lines)
115
+
116
+ if existing_data.strip():
117
+ updated_content = existing_data + "\n" + to_append
118
+ else:
119
+ updated_content = to_append
120
+
121
+ try:
122
+ s3.put_object(
123
+ Bucket=R2_BUCKET_NAME,
124
+ Key=R2_REPLACED_URLS_KEY,
125
+ Body=updated_content.encode("utf-8"),
126
+ ContentType="text/plain",
127
+ )
128
+ logger.info(f"Appended {len(urls)} new URLs to snapzion.txt in R2 (in Markdown format).")
129
+ except Exception as e:
130
+ logger.error(f"Failed to upload replaced URLs to R2: {e}")
131
+
132
+ def calculate_tokens(text: str, model: str) -> int:
133
+ try:
134
+ encoding = tiktoken.encoding_for_model(model)
135
+ tokens = encoding.encode(text)
136
+ return len(tokens)
137
+ except KeyError:
138
+ logger.warning(f"Model '{model}' not supported by tiktoken for token counting. Using a generic method.")
139
+ return len(text.split())
140
+
141
+ def create_chat_completion_data(
142
+ content: str,
143
+ model: str,
144
+ timestamp: int,
145
+ request_id: str,
146
+ system_fingerprint: str,
147
+ prompt_tokens: int = 0,
148
+ completion_tokens: int = 0,
149
+ finish_reason: Optional[str] = None,
150
+ function_call: Optional[Dict] = None,
151
+ ) -> Dict[str, Any]:
152
+ usage = None
153
+ if finish_reason == "stop":
154
+ usage = {
155
+ "prompt_tokens": prompt_tokens,
156
+ "completion_tokens": completion_tokens,
157
+ "total_tokens": prompt_tokens + completion_tokens,
158
+ }
159
+ return {
160
+ "id": request_id,
161
+ "object": "chat.completion.chunk",
162
+ "created": timestamp,
163
+ "model": model,
164
+ "system_fingerprint": system_fingerprint,
165
+ "choices": [{
166
+ "index": 0,
167
+ "delta": {
168
+ "content": content if not function_call else None,
169
+ "role": "assistant",
170
+ "function_call": function_call
171
+ },
172
+ "finish_reason": finish_reason
173
+ }],
174
+ "usage": usage,
175
+ }
176
+
177
+ def message_to_dict(message, model_prefix: Optional[str] = None, tools: Optional[List[Dict]] = None) -> Dict[str, Any]:
178
+ """
179
+ Convert a ChatRequest message to a dict for the request payload.
180
+ Supports function calling, images, and model prefixes.
181
+ """
182
+ content = ""
183
+ images_data = []
184
+ image_urls = []
185
+
186
+ # Handle content based on type
187
+ if isinstance(message.content, list):
188
+ for item in message.content:
189
+ if item.get("type") == "text":
190
+ content = item.get("text", "").strip()
191
+ elif item.get("type") == "image_url" and len(images_data) < 3:
192
+ image_url = item.get("image_url", {}).get("url", "")
193
+ if image_url:
194
+ # Generate unique file path (assuming .jpg, adjust if needed)
195
+ file_path = f"MultipleFiles/{uuid.uuid4().hex}.jpg"
196
+ images_data.append({"filePath": file_path, "contents": image_url})
197
+ image_urls.append({"image_url": {"url": image_url}})
198
+ elif isinstance(message.content, str):
199
+ content = message.content.strip()
200
+
201
+ # Apply model prefix to text content
202
+ if model_prefix and content:
203
+ content = f"{model_prefix} {content}"
204
+
205
+ # Create payload with both formats
206
+ base_message = {"role": message.role, "content": content}
207
+ if images_data:
208
+ base_message["data"] = {
209
+ "imageBase64": images_data[0]["contents"] if images_data else "",
210
+ "fileText": "",
211
+ "title": "snapshot",
212
+ "imagesData": images_data
213
+ }
214
+ # Add additional image_url entries for testing
215
+ for img in image_urls[1:]: # Skip the first image (already in imageBase64)
216
+ base_message["content"] = base_message.get("content", "") # Preserve text
217
+ base_message.setdefault("content", []).append(img)
218
+
219
+ return base_message if images_data else {"role": message.role, "content": content}
220
+
221
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
222
+ if model_prefix and content.startswith(model_prefix):
223
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
224
+ return content[len(model_prefix):].strip()
225
+ return content
226
+
227
+ # ---------------------------------------------
228
+ # STREAMING RESPONSE HANDLER
229
+ # ---------------------------------------------
230
+ async def process_streaming_response(request: ChatRequest):
231
+ system_fingerprint = generate_system_fingerprint()
232
+ random_name, random_email, random_customer_id = get_random_name_email_customer()
233
+
234
+ request_id = f"chatcmpl-{uuid.uuid4()}"
235
+ logger.info(f"Processing request (stream) {request_id} - Model: {request.model}")
236
+
237
+ agent_mode = AGENT_MODE.get(request.model, {})
238
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
239
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
240
+
241
+ headers_api_chat = get_headers_api_chat(BASE_URL)
242
+
243
+ if request.model == "o1-preview":
244
+ delay_seconds = random.randint(1, 60)
245
+ logger.info(f"Delay {delay_seconds}s for model 'o1-preview' (Request: {request_id})")
246
+ await asyncio.sleep(delay_seconds)
247
+
248
+ h_value = await getHid()
249
+ if not h_value:
250
+ logger.error("No h-value for validation.")
251
+ raise HTTPException(status_code=500, detail="Missing h-value.")
252
+
253
+ messages = [message_to_dict(msg, model_prefix=model_prefix, tools=request.tools) for msg in request.messages]
254
+
255
+ json_data = {
256
+ "agentMode": agent_mode,
257
+ "clickedAnswer2": False,
258
+ "clickedAnswer3": False,
259
+ "clickedForceWebSearch": False,
260
+ "codeInterpreterMode": False,
261
+ "codeModelMode": True,
262
+ "githubToken": "",
263
+ "deepSearchMode": False,
264
+ "domains": None,
265
+ "id": request_id,
266
+ "imageGenerationMode": False,
267
+ "isChromeExt": False,
268
+ "isMicMode": False,
269
+ "isPremium": True,
270
+ "isMemoryEnabled": False,
271
+ "maxTokens": request.max_tokens,
272
+ "messages": messages,
273
+ "mobileClient": False,
274
+ "playgroundTemperature": request.temperature,
275
+ "playgroundTopP": request.top_p,
276
+ "previewToken": None,
277
+ "trendingAgentMode": trending_agent_mode,
278
+ "userId": None,
279
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
280
+ "userSystemPrompt": None,
281
+ "validated": h_value,
282
+ "visitFromDelta": False,
283
+ "webSearchModePrompt": False,
284
+ "vscodeClient": False,
285
+ "customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False},
286
+ "webSearchModeOption": {"autoMode": False, "webMode": False, "offlineMode": True},
287
+ "session": {
288
+ "user": {"name": random_name, "email": random_email, "image": "https://lh3.googleusercontent.com/a/...=s96-c", "subscriptionStatus": "PREMIUM"},
289
+ "expires": datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z'),
290
+ "subscriptionCache": {"customerId": random_customer_id, "status": "PREMIUM", "isTrialSubscription": "False", "expiryTimestamp": 1744652408, "lastChecked": int(time.time() * 1000)},
291
+ "beastMode": False,
292
+ "reasoningMode": False,
293
+ "designerMode": False,
294
+ "workspaceId": "",
295
+ },
296
+ }
297
+
298
+ prompt_tokens = 0
299
+ for msg in messages:
300
+ if "content" in msg:
301
+ prompt_tokens += calculate_tokens(msg["content"], request.model)
302
+ if "data" in msg and "imagesData" in msg["data"]:
303
+ for image_data in msg["data"]["imagesData"]:
304
+ prompt_tokens += calculate_tokens(image_data["contents"], request.model)
305
+
306
+ completion_tokens = 0
307
+ final_snapzion_links = []
308
+
309
+ async with httpx.AsyncClient() as client:
310
+ try:
311
+ async with client.stream("POST", f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data, timeout=100) as response:
312
+ response.raise_for_status()
313
+ async for chunk in response.aiter_text():
314
+ timestamp = int(datetime.now().timestamp())
315
+ if not chunk:
316
+ continue
317
+ if chunk.startswith("$@$v=undefined-rv1$@$"):
318
+ chunk = chunk[21:]
319
+ if BLOCKED_MESSAGE in chunk:
320
+ logger.info(f"Blocked message found in chunk (Request: {request_id}).")
321
+ chunk = chunk.replace(BLOCKED_MESSAGE, "").strip()
322
+ if not chunk:
323
+ continue
324
+ if "https://storage.googleapis.com" in chunk:
325
+ chunk = chunk.replace("https://storage.googleapis.com", "https://cdn.snapzion.com")
326
+ snapzion_urls = re.findall(r"(https://cdn\.snapzion\.com[^\s\)]+)", chunk)
327
+ if snapzion_urls:
328
+ final_snapzion_links.extend(snapzion_urls)
329
+ cleaned_content = strip_model_prefix(chunk, model_prefix)
330
+ completion_tokens += calculate_tokens(cleaned_content, request.model)
331
+
332
+ # Handle function call responses
333
+ function_call = None
334
+ if cleaned_content and cleaned_content.startswith("{"):
335
+ try:
336
+ function_call = json.loads(cleaned_content)
337
+ cleaned_content = None # Content must be null for function calls
338
+ except json.JSONDecodeError:
339
+ pass
340
+
341
+ yield "data: " + json.dumps(create_chat_completion_data(
342
+ cleaned_content,
343
+ request.model,
344
+ timestamp,
345
+ request_id,
346
+ system_fingerprint,
347
+ prompt_tokens,
348
+ completion_tokens,
349
+ finish_reason=None,
350
+ function_call=function_call
351
+ )) + "\n\n"
352
+ yield "data: " + json.dumps(create_chat_completion_data("", request.model, timestamp, request_id, system_fingerprint, prompt_tokens, completion_tokens, "stop")) + "\n\n"
353
+ yield "data: [DONE]\n\n"
354
+ except httpx.HTTPStatusError as e:
355
+ logger.error(f"HTTP error (stream) {request_id}: {e}")
356
+ error_message = f"HTTP error occurred: {e}"
357
+ try:
358
+ error_details = e.response.json()
359
+ error_message += f" Details: {error_details}"
360
+ except ValueError:
361
+ error_message += f" Response body: {e.response.text}"
362
+ yield "data: " + json.dumps(create_chat_completion_data(error_message, request.model, int(datetime.now().timestamp()), request_id, system_fingerprint, prompt_tokens, completion_tokens, "error")) + "\n\n"
363
+ yield "data: [DONE]\n\n"
364
+ except httpx.RequestError as e:
365
+ logger.error(f"Request error (stream) {request_id}: {e}")
366
+ error_message = f"Request error occurred: {e}"
367
+ yield "data: " + json.dumps(create_chat_completion_data(error_message, request.model, int(datetime.now().timestamp()), request_id, system_fingerprint, prompt_tokens, completion_tokens, "error")) + "\n\n"
368
+ yield "data: [DONE]\n\n"
369
+ except Exception as e:
370
+ logger.error(f"Unhandled error (stream) {request_id}: {e}")
371
+ error_message = f"An unexpected error occurred: {e}"
372
+ yield "data: " + json.dumps(create_chat_completion_data(error_message, request.model, int(datetime.now().timestamp()), request_id, system_fingerprint, prompt_tokens, completion_tokens, "error")) + "\n\n"
373
+ yield "data: [DONE]\n\n"
374
+
375
+ last_user_prompt = get_last_user_prompt(request.messages)
376
+ upload_replaced_urls_to_r2(final_snapzion_links, alt_text=last_user_prompt)
377
+
378
+ # ---------------------------------------------
379
+ # NON-STREAMING RESPONSE HANDLER
380
+ # ---------------------------------------------
381
+ async def process_non_streaming_response(request: ChatRequest):
382
+ system_fingerprint = generate_system_fingerprint()
383
+ random_name, random_email, random_customer_id = get_random_name_email_customer()
384
+
385
+ request_id = f"chatcmpl-{uuid.uuid4()}"
386
+ logger.info(f"Processing request (non-stream) {request_id} - Model: {request.model}")
387
+
388
+ agent_mode = AGENT_MODE.get(request.model, {})
389
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
390
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
391
+
392
+ headers_api_chat = get_headers_api_chat(BASE_URL)
393
+ headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
394
+
395
+ if request.model == "o1-preview":
396
+ delay_seconds = random.randint(20, 60)
397
+ logger.info(f"Delay {delay_seconds}s for 'o1-preview' (Request: {request_id})")
398
+ await asyncio.sleep(delay_seconds)
399
+
400
+ h_value = "00f37b34-a166-4efb-bce5-1312d87f2f94"
401
+ if not h_value:
402
+ logger.error("Failed to retrieve h-value.")
403
+ raise HTTPException(status_code=500, detail="Missing h-value.")
404
+
405
+ messages = [message_to_dict(msg, model_prefix=model_prefix, tools=request.tools) for msg in request.messages]
406
+
407
+ json_data = {
408
+ "agentMode": agent_mode,
409
+ "clickedAnswer2": False,
410
+ "clickedAnswer3": False,
411
+ "clickedForceWebSearch": False,
412
+ "codeInterpreterMode": False,
413
+ "codeModelMode": True,
414
+ "githubToken": "",
415
+ "deepSearchMode": False,
416
+ "domains": None,
417
+ "id": request_id,
418
+ "imageGenerationMode": False,
419
+ "isChromeExt": False,
420
+ "isMicMode": False,
421
+ "isPremium": True,
422
+ "isMemoryEnabled": False,
423
+ "maxTokens": request.max_tokens,
424
+ "messages": messages,
425
+ "mobileClient": False,
426
+ "playgroundTemperature": request.temperature,
427
+ "playgroundTopP": request.top_p,
428
+ "previewToken": None,
429
+ "trendingAgentMode": trending_agent_mode,
430
+ "userId": None,
431
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
432
+ "userSystemPrompt": None,
433
+ "validated": h_value,
434
+ "visitFromDelta": False,
435
+ "webSearchModePrompt": False,
436
+ "vscodeClient": False,
437
+ "customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False},
438
+ "webSearchModeOption": {"autoMode": False, "webMode": False, "offlineMode": True},
439
+ "session": {
440
+ "user": {"name": random_name, "email": random_email, "image": "https://lh3.googleusercontent.com/a/...=s96-c", "subscriptionStatus": "PREMIUM"},
441
+ "expires": datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z'),
442
+ "subscriptionCache": {"customerId": random_customer_id, "status": "PREMIUM", "isTrialSubscription": "False", "expiryTimestamp": 1744652408, "lastChecked": int(time.time() * 1000)},
443
+ "beastMode": False,
444
+ "reasoningMode": False,
445
+ "designerMode": False,
446
+ "workspaceId": "",
447
+ },
448
+ }
449
+
450
+ prompt_tokens = 0
451
+ for msg in messages:
452
+ if "content" in msg:
453
+ prompt_tokens += calculate_tokens(msg["content"], request.model)
454
+ if "data" in msg and "imagesData" in msg["data"]:
455
+ for image_data in msg["data"]["imagesData"]:
456
+ prompt_tokens += calculate_tokens(image_data["contents"], request.model)
457
+
458
+ full_response = ""
459
+ final_snapzion_links = []
460
+
461
+ async with httpx.AsyncClient() as client:
462
+ try:
463
+ async with client.stream("POST", f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data) as response:
464
+ response.raise_for_status()
465
+ async for chunk in response.aiter_text():
466
+ full_response += chunk
467
+ except httpx.HTTPStatusError as e:
468
+ logger.error(f"HTTP error (non-stream) {request_id}: {e}")
469
+ error_message = f"HTTP error occurred: {e}"
470
+ try:
471
+ error_details = e.response.json()
472
+ error_message += f" Details: {error_details}"
473
+ except ValueError:
474
+ error_message += f" Response body: {e.response.text}"
475
+ return {
476
+ "id": request_id,
477
+ "object": "chat.completion",
478
+ "created": int(datetime.now().timestamp()),
479
+ "model": request.model,
480
+ "system_fingerprint": system_fingerprint,
481
+ "choices": [{"index": 0, "message": {"role": "assistant", "content": error_message}, "finish_reason": "error"}],
482
+ "usage": {"prompt_tokens": prompt_tokens, "completion_tokens": 0, "total_tokens": prompt_tokens},
483
+ }
484
+ except httpx.RequestError as e:
485
+ logger.error(f"Request error (non-stream) {request_id}: {e}")
486
+ error_message = f"Request error occurred: {e}"
487
+ return {
488
+ "id": request_id,
489
+ "object": "chat.completion",
490
+ "created": int(datetime.now().timestamp()),
491
+ "model": request.model,
492
+ "system_fingerprint": system_fingerprint,
493
+ "choices": [{"index": 0, "message": {"role": "assistant", "content": error_message}, "finish_reason": "error"}],
494
+ "usage": {"prompt_tokens": prompt_tokens, "completion_tokens": 0, "total_tokens": prompt_tokens},
495
+ }
496
+ except Exception as e:
497
+ logger.error(f"Unexpected error (non-stream) {request_id}: {e}")
498
+ error_message = f"An unexpected error occurred: {e}"
499
+ return {
500
+ "id": request_id,
501
+ "object": "chat.completion",
502
+ "created": int(datetime.now().timestamp()),
503
+ "model": request.model,
504
+ "system_fingerprint": system_fingerprint,
505
+ "choices": [{"index": 0, "message": {"role": "assistant", "content": error_message}, "finish_reason": "error"}],
506
+ "usage": {"prompt_tokens": prompt_tokens, "completion_tokens": 0, "total_tokens": prompt_tokens},
507
+ }
508
+
509
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
510
+ full_response = full_response[21:]
511
+ if BLOCKED_MESSAGE in full_response:
512
+ full_response = full_response.replace(BLOCKED_MESSAGE, "").strip()
513
+ if not full_response:
514
+ raise HTTPException(status_code=500, detail="Blocked message in response.")
515
+ if "https://storage.googleapis.com" in full_response:
516
+ full_response = full_response.replace("https://storage.googleapis.com", "https://cdn.snapzion.com")
517
+ snapzion_urls = re.findall(r"(https://cdn\.snapzion\.com[^\s\)]+)", full_response)
518
+ for link in snapzion_urls:
519
+ final_snapzion_links.append(link)
520
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
521
+ completion_tokens = calculate_tokens(cleaned_full_response, request.model)
522
+ last_user_prompt = get_last_user_prompt(request.messages)
523
+ upload_replaced_urls_to_r2(final_snapzion_links, alt_text=last_user_prompt)
524
+
525
+ return {
526
+ "id": request_id,
527
+ "object": "chat.completion",
528
+ "created": int(datetime.now().timestamp()),
529
+ "model": request.model,
530
+ "system_fingerprint": system_fingerprint,
531
+ "choices": [{"index": 0, "message": {"role": "assistant", "content": cleaned_full_response}, "finish_reason": "stop"}],
532
+ "usage": {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": prompt_tokens + completion_tokens},
533
+ }
api/validate.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import asyncio
4
+ import aiohttp
5
+ from typing import Optional
6
+
7
+ base_url = "https://blackboxaichat.onrender.com"
8
+ headers = {
9
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
10
+ }
11
+
12
+ # Cache variables
13
+ cached_hid = None
14
+ cache_time = 0
15
+ CACHE_DURATION = 36000 # Cache duration in seconds (10 hours)
16
+
17
+ async def getHid(force_refresh: bool = False) -> Optional[str]:
18
+ global cached_hid, cache_time
19
+ current_time = time.time()
20
+
21
+ # Check if a forced refresh is needed or if the cached values are still valid.
22
+ if not force_refresh and cached_hid and (current_time - cache_time) < CACHE_DURATION:
23
+ print("Using cached_hid:", cached_hid)
24
+ return cached_hid
25
+
26
+ uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
27
+
28
+ def is_valid_context(text_around):
29
+ return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz')
30
+
31
+ try:
32
+ async with aiohttp.ClientSession(headers=headers) as session:
33
+ async with session.get(base_url) as response:
34
+ if response.status != 200:
35
+ print("Failed to load the page.")
36
+ return None
37
+
38
+ page_content = await response.text()
39
+ js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
40
+
41
+ for js_file in js_files:
42
+ js_url = f"{base_url}/_next/{js_file}"
43
+ async with session.get(js_url) as js_response:
44
+ if js_response.status == 200:
45
+ js_content = await js_response.text()
46
+ for match in re.finditer(uuid_format, js_content):
47
+ start = max(0, match.start() - 10)
48
+ end = min(len(js_content), match.end() + 10)
49
+ context = js_content[start:end]
50
+
51
+ if is_valid_context(context):
52
+ validated_value = match.group(1)
53
+ print("Found and validated h-value:", validated_value)
54
+ # Update the cache
55
+ cached_hid = validated_value
56
+ cache_time = current_time
57
+ return validated_value
58
+ print("The h-value was not found in any JS content.")
59
+ return None
60
+ except Exception as e:
61
+ print(f"An error occurred during the request: {e}")
62
+ return None
dockerignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ *.db
6
+ *.sqlite3
7
+ .env
8
+ .git
9
+ .gitignore
10
+ Dockerfile
gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
main.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import uvicorn
2
+ from api.app import app
3
+
4
+ if __name__ == "__main__":
5
+ uvicorn.run(app, host="0.0.0.0", port=8001)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.95.2
2
+ httpx==0.23.3
3
+ pydantic==1.10.4
4
+ python-dotenv==0.21.0
5
+ uvicorn==0.21.1
6
+ aiohttp
7
+ tiktoken
8
+ boto3