File size: 2,639 Bytes
8a7a6a8
 
 
 
 
ea6f2fc
8a7a6a8
650b357
 
 
ea6f2fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650b357
ea6f2fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
from dotenv import load_dotenv

load_dotenv()

APP_SECRET = os.getenv("APP_SECRET")

# GizAI Base URL and API Endpoint
GIZAI_BASE_URL = "https://app.giz.ai"
GIZAI_API_ENDPOINT = f"{GIZAI_BASE_URL}/api/data/users/inferenceServer.infer"

# Allowed models
ALLOWED_MODELS = [
    {"id": "chat-gemini-flash", "name": "chat-gemini-flash"},
    {"id": "chat-gemini-pro", "name": "chat-gemini-pro"},
    {"id": "chat-gpt4m", "name": "chat-gpt4m"},
    {"id": "chat-gpt4", "name": "chat-gpt4"},
    {"id": "claude-sonnet", "name": "claude-sonnet"},
    {"id": "claude-haiku", "name": "claude-haiku"},
    {"id": "llama-3-70b", "name": "llama-3-70b"},
    {"id": "llama-3-8b", "name": "llama-3-8b"},
    {"id": "mistral-large", "name": "mistral-large"},
    {"id": "chat-o1-mini", "name": "chat-o1-mini"},
    {"id": "flux1", "name": "flux1"},
    {"id": "sdxl", "name": "sdxl"},
    {"id": "sd", "name": "sd"},
    {"id": "sd35", "name": "sd35"},
]

# Model to provider mapping
MODEL_PROVIDER_MAPPING = {
    # GizAI models
    "chat-gemini-flash": "gizai",
    "chat-gemini-pro": "gizai",
    "chat-gpt4m": "gizai",
    "chat-gpt4": "gizai",
    "claude-sonnet": "gizai",
    "claude-haiku": "gizai",
    "llama-3-70b": "gizai",
    "llama-3-8b": "gizai",
    "mistral-large": "gizai",
    "chat-o1-mini": "gizai",
    "flux1": "gizai",
    "sdxl": "gizai",
    "sd": "gizai",
    "sd35": "gizai",
    # Aliases
    "gemini-flash": "gizai",
    "gemini-pro": "gizai",
    "gpt-4o-mini": "gizai",
    "gpt-4o": "gizai",
    "claude-3.5-sonnet": "gizai",
    "claude-3-haiku": "gizai",
    "llama-3.1-70b": "gizai",
    "llama-3.1-8b": "gizai",
    "o1-mini": "gizai",
    "sd-1.5": "gizai",
    "sd-3.5": "gizai",
    "flux-schnell": "gizai",
}

# Model mapping (aliases)
MODEL_MAPPING = {
    "chat-gemini-flash": "chat-gemini-flash",
    "chat-gemini-pro": "chat-gemini-pro",
    "chat-gpt4m": "chat-gpt4m",
    "chat-gpt4": "chat-gpt4",
    "claude-sonnet": "claude-sonnet",
    "claude-haiku": "claude-haiku",
    "llama-3-70b": "llama-3-70b",
    "llama-3-8b": "llama-3-8b",
    "mistral-large": "mistral-large",
    "chat-o1-mini": "chat-o1-mini",
    "flux1": "flux1",
    "sdxl": "sdxl",
    "sd": "sd",
    "sd35": "sd35",
    # Aliases
    "gemini-flash": "chat-gemini-flash",
    "gemini-pro": "chat-gemini-pro",
    "gpt-4o-mini": "chat-gpt4m",
    "gpt-4o": "chat-gpt4",
    "claude-3.5-sonnet": "claude-sonnet",
    "claude-3-haiku": "claude-haiku",
    "llama-3.1-70b": "llama-3-70b",
    "llama-3.1-8b": "llama-3-8b",
    "o1-mini": "chat-o1-mini",
    "sd-1.5": "sd",
    "sd-3.5": "sd35",
    "flux-schnell": "flux1",
}