Niansuh commited on
Commit
3b29c2b
·
verified ·
1 Parent(s): d77c659

Delete api/providers

Browse files
Files changed (1) hide show
  1. api/providers/gizai.py +0 -152
api/providers/gizai.py DELETED
@@ -1,152 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- from aiohttp import ClientSession
5
-
6
- from ..typing import AsyncResult, Messages
7
- from ..image import ImageResponse
8
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
9
- from .helper import format_prompt
10
-
11
-
12
- class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
13
- url = "https://app.giz.ai/assistant/"
14
- api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
15
- working = True
16
-
17
- supports_system_message = True
18
- supports_message_history = True
19
-
20
- # Chat models
21
- default_model = 'chat-gemini-flash'
22
- chat_models = [
23
- default_model,
24
- 'chat-gemini-pro',
25
- 'chat-gpt4m',
26
- 'chat-gpt4',
27
- 'claude-sonnet',
28
- 'claude-haiku',
29
- 'llama-3-70b',
30
- 'llama-3-8b',
31
- 'mistral-large',
32
- 'chat-o1-mini'
33
- ]
34
-
35
- # Image models
36
- image_models = [
37
- 'flux1',
38
- 'sdxl',
39
- 'sd',
40
- 'sd35',
41
- ]
42
-
43
- models = [*chat_models, *image_models]
44
-
45
- model_aliases = {
46
- # Chat model aliases
47
- "gemini-flash": "chat-gemini-flash",
48
- "gemini-pro": "chat-gemini-pro",
49
- "gpt-4o-mini": "chat-gpt4m",
50
- "gpt-4o": "chat-gpt4",
51
- "claude-3.5-sonnet": "claude-sonnet",
52
- "claude-3-haiku": "claude-haiku",
53
- "llama-3.1-70b": "llama-3-70b",
54
- "llama-3.1-8b": "llama-3-8b",
55
- "o1-mini": "chat-o1-mini",
56
- # Image model aliases
57
- "sd-1.5": "sd",
58
- "sd-3.5": "sd35",
59
- "flux-schnell": "flux1",
60
- }
61
-
62
- @classmethod
63
- def get_model(cls, model: str) -> str:
64
- if model in cls.models:
65
- return model
66
- elif model in cls.model_aliases:
67
- return cls.model_aliases[model]
68
- else:
69
- return cls.default_model
70
-
71
- @classmethod
72
- def is_image_model(cls, model: str) -> bool:
73
- return model in cls.image_models
74
-
75
- @classmethod
76
- async def create_async_generator(
77
- cls,
78
- model: str,
79
- messages: Messages,
80
- proxy: str = None,
81
- **kwargs
82
- ) -> AsyncResult:
83
- model = cls.get_model(model)
84
-
85
- headers = {
86
- 'Accept': 'application/json, text/plain, */*',
87
- 'Accept-Language': 'en-US,en;q=0.9',
88
- 'Cache-Control': 'no-cache',
89
- 'Connection': 'keep-alive',
90
- 'Content-Type': 'application/json',
91
- 'Origin': 'https://app.giz.ai',
92
- 'Pragma': 'no-cache',
93
- 'Sec-Fetch-Dest': 'empty',
94
- 'Sec-Fetch-Mode': 'cors',
95
- 'Sec-Fetch-Site': 'same-origin',
96
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
97
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
98
- 'sec-ch-ua-mobile': '?0',
99
- 'sec-ch-ua-platform': '"Linux"'
100
- }
101
-
102
- async with ClientSession() as session:
103
- if cls.is_image_model(model):
104
- # Image generation
105
- prompt = messages[-1]["content"]
106
- data = {
107
- "model": model,
108
- "input": {
109
- "width": "1024",
110
- "height": "1024",
111
- "steps": 4,
112
- "output_format": "webp",
113
- "batch_size": 1,
114
- "mode": "plan",
115
- "prompt": prompt
116
- }
117
- }
118
- async with session.post(
119
- cls.api_endpoint,
120
- headers=headers,
121
- data=json.dumps(data),
122
- proxy=proxy
123
- ) as response:
124
- response.raise_for_status()
125
- response_data = await response.json()
126
- if response_data.get('status') == 'completed' and response_data.get('output'):
127
- for url in response_data['output']:
128
- yield ImageResponse(images=url, alt="Generated Image")
129
- else:
130
- # Chat completion
131
- data = {
132
- "model": model,
133
- "input": {
134
- "messages": [
135
- {
136
- "type": "human",
137
- "content": format_prompt(messages)
138
- }
139
- ],
140
- "mode": "plan"
141
- },
142
- "noStream": True
143
- }
144
- async with session.post(
145
- cls.api_endpoint,
146
- headers=headers,
147
- data=json.dumps(data),
148
- proxy=proxy
149
- ) as response:
150
- response.raise_for_status()
151
- result = await response.json()
152
- yield result.get('output', '')