Niansuh commited on
Commit
efc46e8
·
verified ·
1 Parent(s): aaa9136

Rename api/provider/gizai.py to api/provider/amigochat.py

Browse files
Files changed (2) hide show
  1. api/provider/amigochat.py +265 -0
  2. api/provider/gizai.py +0 -186
api/provider/amigochat.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import uuid
3
+ import asyncio
4
+ from aiohttp import ClientSession, ClientTimeout, ClientResponseError
5
+ from fastapi import HTTPException
6
+ from api.logger import setup_logger
7
+ from api.config import MODEL_MAPPING
8
+
9
+ logger = setup_logger(__name__)
10
+
11
+ # Base URLs
12
+ AMIGOCHAT_URL = "https://amigochat.io/chat/"
13
+ CHAT_API_ENDPOINT = "https://api.amigochat.io/v1/chat/completions"
14
+ IMAGE_API_ENDPOINT = "https://api.amigochat.io/v1/images/generations"
15
+
16
+ # List of models supported by AmigoChat
17
+ AMIGOCHAT_CHAT_MODELS = [
18
+ 'gpt-4o',
19
+ 'gpt-4o-mini',
20
+ 'o1-preview',
21
+ 'o1-mini',
22
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
23
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
24
+ 'claude-3-sonnet-20240229',
25
+ 'gemini-1.5-pro',
26
+ ]
27
+
28
+ AMIGOCHAT_IMAGE_MODELS = [
29
+ 'flux-pro/v1.1',
30
+ 'flux-realism',
31
+ 'flux-pro',
32
+ 'dalle-e-3',
33
+ ]
34
+
35
+ AMIGOCHAT_MODELS = AMIGOCHAT_CHAT_MODELS + AMIGOCHAT_IMAGE_MODELS
36
+
37
+ AMIGOCHAT_MODEL_ALIASES = {
38
+ "o1": "o1-preview",
39
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
40
+ "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
41
+ "claude-3.5-sonnet": "claude-3-sonnet-20240229",
42
+ "gemini-pro": "gemini-1.5-pro",
43
+ "dalle-3": "dalle-e-3",
44
+ }
45
+
46
+ PERSONA_IDS = {
47
+ 'gpt-4o': "gpt",
48
+ 'gpt-4o-mini': "amigo",
49
+ 'o1-preview': "openai-o-one",
50
+ 'o1-mini': "openai-o-one-mini",
51
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
52
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
53
+ 'claude-3-sonnet-20240229': "claude",
54
+ 'gemini-1.5-pro': "gemini-1-5-pro",
55
+ 'flux-pro/v1.1': "flux-1-1-pro",
56
+ 'flux-realism': "flux-realism",
57
+ 'flux-pro': "flux-pro",
58
+ 'dalle-e-3': "dalle-three",
59
+ }
60
+
61
+ def get_amigochat_model(model: str) -> str:
62
+ model = MODEL_MAPPING.get(model, model)
63
+ if model in AMIGOCHAT_MODELS:
64
+ return model
65
+ elif model in AMIGOCHAT_MODEL_ALIASES:
66
+ return AMIGOCHAT_MODEL_ALIASES[model]
67
+ else:
68
+ # Default model
69
+ return 'gpt-4o-mini'
70
+
71
+ def get_persona_id(model: str) -> str:
72
+ return PERSONA_IDS.get(model, "amigo")
73
+
74
+ def is_image_model(model: str) -> bool:
75
+ return model in AMIGOCHAT_IMAGE_MODELS
76
+
77
+ async def process_streaming_response(request_data):
78
+ model = get_amigochat_model(request_data.get('model'))
79
+ messages = request_data.get('messages')
80
+ stream = request_data.get('stream', False)
81
+ if not messages:
82
+ raise HTTPException(status_code=400, detail="Messages are required")
83
+
84
+ device_uuid = str(uuid.uuid4())
85
+
86
+ headers = {
87
+ "accept": "*/*",
88
+ "accept-language": "en-US,en;q=0.9",
89
+ "authorization": "Bearer",
90
+ "cache-control": "no-cache",
91
+ "content-type": "application/json",
92
+ "origin": AMIGOCHAT_URL,
93
+ "pragma": "no-cache",
94
+ "priority": "u=1, i",
95
+ "referer": f"{AMIGOCHAT_URL}/",
96
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
97
+ "sec-ch-ua-mobile": "?0",
98
+ "sec-ch-ua-platform": '"Linux"',
99
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
100
+ "x-device-language": "en-US",
101
+ "x-device-platform": "web",
102
+ "x-device-uuid": device_uuid,
103
+ "x-device-version": "1.0.32"
104
+ }
105
+
106
+ async with ClientSession(headers=headers) as session:
107
+ if is_image_model(model):
108
+ # Image generation does not support streaming
109
+ response = await process_non_streaming_response(request_data)
110
+ return iter([json.dumps(response)])
111
+ else:
112
+ # Chat completion
113
+ data = {
114
+ "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
115
+ "model": model,
116
+ "personaId": get_persona_id(model),
117
+ "frequency_penalty": 0,
118
+ "max_tokens": 4000,
119
+ "presence_penalty": 0,
120
+ "stream": True,
121
+ "temperature": 0.5,
122
+ "top_p": 0.95
123
+ }
124
+
125
+ timeout = ClientTimeout(total=300) # 5 minutes timeout
126
+
127
+ async def event_stream():
128
+ try:
129
+ async with session.post(CHAT_API_ENDPOINT, json=data, timeout=timeout) as resp:
130
+ if resp.status not in (200, 201):
131
+ error_text = await resp.text()
132
+ raise HTTPException(status_code=resp.status, detail=error_text)
133
+
134
+ async for line in resp.content:
135
+ line = line.decode('utf-8').strip()
136
+ if line.startswith('data: '):
137
+ if line == 'data: [DONE]':
138
+ break
139
+ try:
140
+ chunk = json.loads(line[6:]) # Remove 'data: ' prefix
141
+ if 'choices' in chunk and len(chunk['choices']) > 0:
142
+ choice = chunk['choices'][0]
143
+ delta = choice.get('delta', {})
144
+ content = delta.get('content')
145
+ if content:
146
+ # Yield the chunk in OpenAI format
147
+ response_data = {
148
+ "id": f"chatcmpl-{uuid.uuid4()}",
149
+ "object": "chat.completion.chunk",
150
+ "created": int(datetime.now().timestamp()),
151
+ "model": model,
152
+ "choices": [
153
+ {
154
+ "delta": {"content": content},
155
+ "index": 0,
156
+ "finish_reason": None,
157
+ }
158
+ ],
159
+ }
160
+ yield f"data: {json.dumps(response_data)}\n\n"
161
+ except json.JSONDecodeError:
162
+ pass
163
+ # Signal the end of the stream
164
+ yield "data: [DONE]\n\n"
165
+ except Exception as e:
166
+ logger.error(f"Error in streaming response: {e}")
167
+ raise HTTPException(status_code=500, detail=str(e))
168
+
169
+ return event_stream()
170
+
171
+ async def process_non_streaming_response(request_data):
172
+ model = get_amigochat_model(request_data.get('model'))
173
+ messages = request_data.get('messages')
174
+ if not messages:
175
+ raise HTTPException(status_code=400, detail="Messages are required")
176
+
177
+ device_uuid = str(uuid.uuid4())
178
+
179
+ headers = {
180
+ "accept": "*/*",
181
+ "accept-language": "en-US,en;q=0.9",
182
+ "authorization": "Bearer",
183
+ "cache-control": "no-cache",
184
+ "content-type": "application/json",
185
+ "origin": AMIGOCHAT_URL,
186
+ "pragma": "no-cache",
187
+ "priority": "u=1, i",
188
+ "referer": f"{AMIGOCHAT_URL}/",
189
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
190
+ "sec-ch-ua-mobile": "?0",
191
+ "sec-ch-ua-platform": '"Linux"',
192
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
193
+ "x-device-language": "en-US",
194
+ "x-device-platform": "web",
195
+ "x-device-uuid": device_uuid,
196
+ "x-device-version": "1.0.32"
197
+ }
198
+
199
+ async with ClientSession(headers=headers) as session:
200
+ if is_image_model(model):
201
+ # Image generation
202
+ prompt = messages[-1]['content']
203
+ data = {
204
+ "prompt": prompt,
205
+ "model": model,
206
+ "personaId": get_persona_id(model)
207
+ }
208
+ try:
209
+ async with session.post(IMAGE_API_ENDPOINT, json=data) as response:
210
+ response.raise_for_status()
211
+ response_data = await response.json()
212
+ if "data" in response_data:
213
+ image_urls = []
214
+ for item in response_data["data"]:
215
+ if "url" in item:
216
+ image_url = item["url"]
217
+ image_urls.append(image_url)
218
+ if image_urls:
219
+ return {
220
+ "id": f"imggen-{uuid.uuid4()}",
221
+ "object": "image_generation",
222
+ "created": int(datetime.now().timestamp()),
223
+ "model": model,
224
+ "data": image_urls,
225
+ }
226
+ raise HTTPException(status_code=500, detail="Image generation failed")
227
+ except Exception as e:
228
+ logger.error(f"Error in image generation: {e}")
229
+ raise HTTPException(status_code=500, detail=str(e))
230
+ else:
231
+ # Chat completion
232
+ data = {
233
+ "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
234
+ "model": model,
235
+ "personaId": get_persona_id(model),
236
+ "frequency_penalty": 0,
237
+ "max_tokens": 4000,
238
+ "presence_penalty": 0,
239
+ "stream": False,
240
+ "temperature": 0.5,
241
+ "top_p": 0.95
242
+ }
243
+
244
+ try:
245
+ async with session.post(CHAT_API_ENDPOINT, json=data) as response:
246
+ response.raise_for_status()
247
+ response_data = await response.json()
248
+ output = response_data.get('choices', [{}])[0].get('message', {}).get('content', '')
249
+ return {
250
+ "id": f"chatcmpl-{uuid.uuid4()}",
251
+ "object": "chat.completion",
252
+ "created": int(datetime.now().timestamp()),
253
+ "model": model,
254
+ "choices": [
255
+ {
256
+ "index": 0,
257
+ "message": {"role": "assistant", "content": output},
258
+ "finish_reason": "stop",
259
+ }
260
+ ],
261
+ "usage": None,
262
+ }
263
+ except Exception as e:
264
+ logger.error(f"Error in chat completion: {e}")
265
+ raise HTTPException(status_code=500, detail=str(e))
api/provider/gizai.py DELETED
@@ -1,186 +0,0 @@
1
- import uuid
2
- from datetime import datetime
3
- import json
4
- from typing import Any, Dict
5
-
6
- import httpx
7
- from fastapi import HTTPException
8
- from api.logger import setup_logger
9
- from api.config import MODEL_MAPPING
10
-
11
- logger = setup_logger(__name__)
12
-
13
- # Base URL and API Endpoint for GizAI
14
- GIZAI_BASE_URL = "https://app.giz.ai"
15
- GIZAI_API_ENDPOINT = f"{GIZAI_BASE_URL}/api/data/users/inferenceServer.infer"
16
-
17
- # Headers for GizAI
18
- GIZAI_HEADERS = {
19
- 'Accept': 'application/json, text/plain, */*',
20
- 'Accept-Language': 'en-US,en;q=0.9',
21
- 'Cache-Control': 'no-cache',
22
- 'Connection': 'keep-alive',
23
- 'Content-Type': 'application/json',
24
- 'Origin': 'https://app.giz.ai',
25
- 'Pragma': 'no-cache',
26
- 'Sec-Fetch-Dest': 'empty',
27
- 'Sec-Fetch-Mode': 'cors',
28
- 'Sec-Fetch-Site': 'same-origin',
29
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
30
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
31
- 'sec-ch-ua-mobile': '?0',
32
- 'sec-ch-ua-platform': '"Linux"'
33
- }
34
-
35
- # List of models supported by GizAI
36
- GIZAI_CHAT_MODELS = [
37
- 'chat-gemini-flash',
38
- 'chat-gemini-pro',
39
- 'chat-gpt4m',
40
- 'chat-gpt4',
41
- 'claude-sonnet',
42
- 'claude-haiku',
43
- 'llama-3-70b',
44
- 'llama-3-8b',
45
- 'mistral-large',
46
- 'chat-o1-mini'
47
- ]
48
-
49
- GIZAI_IMAGE_MODELS = [
50
- 'flux1',
51
- 'sdxl',
52
- 'sd',
53
- 'sd35',
54
- ]
55
-
56
- GIZAI_MODELS = GIZAI_CHAT_MODELS + GIZAI_IMAGE_MODELS
57
-
58
- GIZAI_MODEL_ALIASES = {
59
- # Chat model aliases
60
- "gemini-flash": "chat-gemini-flash",
61
- "gemini-pro": "chat-gemini-pro",
62
- "gpt-4o-mini": "chat-gpt4m",
63
- "gpt-4o": "chat-gpt4",
64
- "claude-3.5-sonnet": "claude-sonnet",
65
- "claude-3-haiku": "claude-haiku",
66
- "llama-3.1-70b": "llama-3-70b",
67
- "llama-3.1-8b": "llama-3-8b",
68
- "o1-mini": "chat-o1-mini",
69
- # Image model aliases
70
- "sd-1.5": "sd",
71
- "sd-3.5": "sd35",
72
- "flux-schnell": "flux1",
73
- }
74
-
75
- def get_gizai_model(model: str) -> str:
76
- model = MODEL_MAPPING.get(model, model)
77
- if model in GIZAI_MODELS:
78
- return model
79
- elif model in GIZAI_MODEL_ALIASES:
80
- return GIZAI_MODEL_ALIASES[model]
81
- else:
82
- # Default model
83
- return 'chat-gemini-flash'
84
-
85
- def is_image_model(model: str) -> bool:
86
- return model in GIZAI_IMAGE_MODELS
87
-
88
- async def process_streaming_response(request_data):
89
- # GizAI does not support streaming; handle as non-streaming
90
- response = await process_non_streaming_response(request_data)
91
- # Return the response wrapped in an iterator
92
- return iter([json.dumps(response)])
93
-
94
- async def process_non_streaming_response(request_data):
95
- model = get_gizai_model(request_data.get('model'))
96
-
97
- async with httpx.AsyncClient() as client:
98
- if is_image_model(model):
99
- # Image generation
100
- prompt = request_data['messages'][-1]['content']
101
- data = {
102
- "model": model,
103
- "input": {
104
- "width": "1024",
105
- "height": "1024",
106
- "steps": 4,
107
- "output_format": "webp",
108
- "batch_size": 1,
109
- "mode": "plan",
110
- "prompt": prompt
111
- }
112
- }
113
- try:
114
- response = await client.post(
115
- GIZAI_API_ENDPOINT,
116
- headers=GIZAI_HEADERS,
117
- json=data,
118
- timeout=100,
119
- )
120
- response.raise_for_status()
121
- response_data = response.json()
122
- if response_data.get('status') == 'completed' and response_data.get('output'):
123
- images = response_data['output']
124
- # Return image response (e.g., URLs)
125
- return {
126
- "id": f"imggen-{uuid.uuid4()}",
127
- "object": "image_generation",
128
- "created": int(datetime.now().timestamp()),
129
- "model": request_data['model'],
130
- "data": images,
131
- }
132
- else:
133
- raise HTTPException(status_code=500, detail="Image generation failed")
134
- except httpx.HTTPStatusError as e:
135
- logger.error(f"HTTP error occurred: {e}")
136
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
137
- except httpx.RequestError as e:
138
- logger.error(f"Error occurred during request: {e}")
139
- raise HTTPException(status_code=500, detail=str(e))
140
- else:
141
- # Chat completion
142
- messages = request_data['messages']
143
- messages_content = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
144
- data = {
145
- "model": model,
146
- "input": {
147
- "messages": [
148
- {
149
- "type": "human",
150
- "content": messages_content
151
- }
152
- ],
153
- "mode": "plan"
154
- },
155
- "noStream": True
156
- }
157
- try:
158
- response = await client.post(
159
- GIZAI_API_ENDPOINT,
160
- headers=GIZAI_HEADERS,
161
- json=data,
162
- timeout=100,
163
- )
164
- response.raise_for_status()
165
- response_data = response.json()
166
- output = response_data.get('output', '')
167
- return {
168
- "id": f"chatcmpl-{uuid.uuid4()}",
169
- "object": "chat.completion",
170
- "created": int(datetime.now().timestamp()),
171
- "model": request_data['model'],
172
- "choices": [
173
- {
174
- "index": 0,
175
- "message": {"role": "assistant", "content": output},
176
- "finish_reason": "stop",
177
- }
178
- ],
179
- "usage": None,
180
- }
181
- except httpx.HTTPStatusError as e:
182
- logger.error(f"HTTP error occurred: {e}")
183
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
184
- except httpx.RequestError as e:
185
- logger.error(f"Error occurred during request: {e}")
186
- raise HTTPException(status_code=500, detail=str(e))