test24 / api /config.py
Niansuh's picture
Update api/config.py
2efedcc verified
raw
history blame
3.59 kB
import os
from dotenv import load_dotenv
load_dotenv()
# Base URL and Common Headers for GizAI
BASE_URL = "https://app.giz.ai/assistant/"
common_headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
'Origin': 'https://app.giz.ai',
'Pragma': 'no-cache',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"'
}
# Header Configurations for GizAI API Calls
def get_headers_api_chat(referer_url):
return {**common_headers, 'Referer': referer_url}
def get_headers_chat(chat_url, next_action, next_router_state_tree):
return {
**common_headers,
'Accept': 'text/x-component',
'Content-Type': 'text/plain;charset=UTF-8',
'Referer': chat_url,
'next-action': next_action,
'next-router-state-tree': next_router_state_tree,
'next-url': '/',
}
# Application Secret for Authentication
APP_SECRET = os.getenv("APP_SECRET")
# Default Model
default_model = 'chat-gemini-flash'
# Supported Chat Models
chat_models = [
'chat-gemini-flash',
'chat-gemini-pro',
'chat-gpt4m',
'chat-gpt4',
'claude-sonnet',
'claude-haiku',
'llama-3-70b',
'llama-3-8b',
'mistral-large',
'chat-o1-mini'
]
# Supported Image Models
image_models = [
'flux1',
'sdxl',
'sd',
'sd35',
]
# Combined Models List
models = [*chat_models, *image_models]
# Model Aliases
model_aliases = {
# Chat model aliases
"gemini-flash": "chat-gemini-flash",
"gemini-pro": "chat-gemini-pro",
"gpt-4o-mini": "chat-gpt4m",
"gpt-4o": "chat-gpt4",
"claude-3.5-sonnet": "claude-sonnet",
"claude-3-haiku": "claude-haiku",
"llama-3.1-70b": "llama-3-70b",
"llama-3.1-8b": "llama-3-8b",
"o1-mini": "chat-o1-mini",
# Image model aliases
"sd-1.5": "sd",
"sd-3.5": "sd35",
"flux-schnell": "flux1",
}
# Allowed Models with ID and Name
ALLOWED_MODELS = [
{"id": "chat-gemini-flash", "name": "Chat Gemini Flash"},
{"id": "chat-gemini-pro", "name": "Chat Gemini Pro"},
{"id": "chat-gpt4m", "name": "Chat GPT-4m"},
{"id": "chat-gpt4", "name": "Chat GPT-4"},
{"id": "claude-sonnet", "name": "Claude Sonnet"},
{"id": "claude-haiku", "name": "Claude Haiku"},
{"id": "llama-3-70b", "name": "LLaMA 3-70B"},
{"id": "llama-3-8b", "name": "LLaMA 3-8B"},
{"id": "mistral-large", "name": "Mistral Large"},
{"id": "chat-o1-mini", "name": "Chat O1 Mini"},
{"id": "flux1", "name": "Flux1"},
{"id": "sdxl", "name": "SDXL"},
{"id": "sd", "name": "SD"},
{"id": "sd35", "name": "SD35"},
]
# Mapping from model IDs to their canonical names
MODEL_MAPPING = {
"chat-gemini-flash": "chat-gemini-flash",
"chat-gemini-pro": "chat-gemini-pro",
"chat-gpt4m": "chat-gpt4m",
"chat-gpt4": "chat-gpt4",
"claude-sonnet": "claude-sonnet",
"claude-haiku": "claude-haiku",
"llama-3-70b": "llama-3-70b",
"llama-3-8b": "llama-3-8b",
"mistral-large": "mistral-large",
"chat-o1-mini": "chat-o1-mini",
"flux1": "flux1",
"sdxl": "sdxl",
"sd": "sd",
"sd35": "sd35",
}
# Removed MODEL_PREFIXES, MODEL_REFERERS, and AGENT_MODE as per user instruction