import os from dotenv import load_dotenv load_dotenv() # Base URL and endpoint configurations BASE_URL = "https://www.blackbox.ai" API_ENDPOINT = f"{BASE_URL}/api/chat" # Custom headers for requests def get_headers(): return { 'accept': '*/*', 'accept-language': 'en-US,en;q=0.9', 'cache-control': 'no-cache', 'content-type': 'application/json', 'origin': BASE_URL, 'pragma': 'no-cache', 'priority': 'u=1, i', 'referer': f'{BASE_URL}/', 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/130.0.0.0 Safari/537.36', } # Model configurations APP_SECRET = os.getenv("APP_SECRET") ALLOWED_MODELS = [ {"id": "blackboxai", "name": "blackboxai"}, {"id": "blackboxai-pro", "name": "blackboxai-pro"}, {"id": "flux", "name": "flux"}, {"id": "llama-3.1-8b", "name": "llama-3.1-8b"}, {"id": "llama-3.1-70b", "name": "llama-3.1-70b"}, {"id": "llama-3.1-405b", "name": "llama-3.1-405"}, {"id": "gpt-4o", "name": "gpt-4o"}, {"id": "gemini-pro", "name": "gemini-pro"}, {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"}, {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"}, {"id": "PythonAgent", "name": "PythonAgent"}, # Other models... ] # Mapping of models for streamlined access MODEL_MAPPING = {model['id']: model['name'] for model in ALLOWED_MODELS} # Agent modes AGENT_MODE = { 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"}, 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"}, 'o1-preview': {'mode': True, 'id': "o1Dst8La8", 'name': "o1-preview"}, } TRENDING_AGENT_MODE = { "blackboxai": {}, "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, # Other agent modes... }