Update api/provider/gizai.py
Browse files- api/provider/gizai.py +34 -10
api/provider/gizai.py
CHANGED
@@ -5,12 +5,33 @@ from typing import Any, Dict
|
|
5 |
|
6 |
import httpx
|
7 |
from fastapi import HTTPException
|
8 |
-
from api.models import ChatRequest
|
9 |
from api.logger import setup_logger
|
10 |
-
from api.config import MODEL_MAPPING
|
11 |
|
12 |
logger = setup_logger(__name__)
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# List of models supported by GizAI
|
15 |
GIZAI_CHAT_MODELS = [
|
16 |
'chat-gemini-flash',
|
@@ -64,17 +85,19 @@ def get_gizai_model(model: str) -> str:
|
|
64 |
def is_image_model(model: str) -> bool:
|
65 |
return model in GIZAI_IMAGE_MODELS
|
66 |
|
67 |
-
async def process_streaming_response(
|
68 |
# GizAI does not support streaming; handle as non-streaming
|
69 |
-
|
|
|
|
|
70 |
|
71 |
-
async def process_non_streaming_response(
|
72 |
-
model = get_gizai_model(
|
73 |
|
74 |
async with httpx.AsyncClient() as client:
|
75 |
if is_image_model(model):
|
76 |
# Image generation
|
77 |
-
prompt =
|
78 |
data = {
|
79 |
"model": model,
|
80 |
"input": {
|
@@ -103,7 +126,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
103 |
"id": f"imggen-{uuid.uuid4()}",
|
104 |
"object": "image_generation",
|
105 |
"created": int(datetime.now().timestamp()),
|
106 |
-
"model":
|
107 |
"data": images,
|
108 |
}
|
109 |
else:
|
@@ -116,7 +139,8 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
116 |
raise HTTPException(status_code=500, detail=str(e))
|
117 |
else:
|
118 |
# Chat completion
|
119 |
-
|
|
|
120 |
data = {
|
121 |
"model": model,
|
122 |
"input": {
|
@@ -144,7 +168,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
144 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
145 |
"object": "chat.completion",
|
146 |
"created": int(datetime.now().timestamp()),
|
147 |
-
"model":
|
148 |
"choices": [
|
149 |
{
|
150 |
"index": 0,
|
|
|
5 |
|
6 |
import httpx
|
7 |
from fastapi import HTTPException
|
|
|
8 |
from api.logger import setup_logger
|
9 |
+
from api.config import MODEL_MAPPING
|
10 |
|
11 |
logger = setup_logger(__name__)
|
12 |
|
13 |
+
# Base URL and API Endpoint for GizAI
|
14 |
+
GIZAI_BASE_URL = "https://app.giz.ai"
|
15 |
+
GIZAI_API_ENDPOINT = f"{GIZAI_BASE_URL}/api/data/users/inferenceServer.infer"
|
16 |
+
|
17 |
+
# Headers for GizAI
|
18 |
+
GIZAI_HEADERS = {
|
19 |
+
'Accept': 'application/json, text/plain, */*',
|
20 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
21 |
+
'Cache-Control': 'no-cache',
|
22 |
+
'Connection': 'keep-alive',
|
23 |
+
'Content-Type': 'application/json',
|
24 |
+
'Origin': 'https://app.giz.ai',
|
25 |
+
'Pragma': 'no-cache',
|
26 |
+
'Sec-Fetch-Dest': 'empty',
|
27 |
+
'Sec-Fetch-Mode': 'cors',
|
28 |
+
'Sec-Fetch-Site': 'same-origin',
|
29 |
+
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
30 |
+
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
|
31 |
+
'sec-ch-ua-mobile': '?0',
|
32 |
+
'sec-ch-ua-platform': '"Linux"'
|
33 |
+
}
|
34 |
+
|
35 |
# List of models supported by GizAI
|
36 |
GIZAI_CHAT_MODELS = [
|
37 |
'chat-gemini-flash',
|
|
|
85 |
def is_image_model(model: str) -> bool:
|
86 |
return model in GIZAI_IMAGE_MODELS
|
87 |
|
88 |
+
async def process_streaming_response(request_data):
|
89 |
# GizAI does not support streaming; handle as non-streaming
|
90 |
+
response = await process_non_streaming_response(request_data)
|
91 |
+
# Return the response wrapped in an iterator
|
92 |
+
return iter([json.dumps(response)])
|
93 |
|
94 |
+
async def process_non_streaming_response(request_data):
|
95 |
+
model = get_gizai_model(request_data.get('model'))
|
96 |
|
97 |
async with httpx.AsyncClient() as client:
|
98 |
if is_image_model(model):
|
99 |
# Image generation
|
100 |
+
prompt = request_data['messages'][-1]['content']
|
101 |
data = {
|
102 |
"model": model,
|
103 |
"input": {
|
|
|
126 |
"id": f"imggen-{uuid.uuid4()}",
|
127 |
"object": "image_generation",
|
128 |
"created": int(datetime.now().timestamp()),
|
129 |
+
"model": request_data['model'],
|
130 |
"data": images,
|
131 |
}
|
132 |
else:
|
|
|
139 |
raise HTTPException(status_code=500, detail=str(e))
|
140 |
else:
|
141 |
# Chat completion
|
142 |
+
messages = request_data['messages']
|
143 |
+
messages_content = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
|
144 |
data = {
|
145 |
"model": model,
|
146 |
"input": {
|
|
|
168 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
169 |
"object": "chat.completion",
|
170 |
"created": int(datetime.now().timestamp()),
|
171 |
+
"model": request_data['model'],
|
172 |
"choices": [
|
173 |
{
|
174 |
"index": 0,
|