Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -3,31 +3,31 @@ import time
|
|
3 |
import random
|
4 |
import asyncio
|
5 |
import json
|
6 |
-
|
7 |
from fastapi import FastAPI, HTTPException, Depends
|
8 |
from fastapi.middleware.cors import CORSMiddleware
|
9 |
from fastapi.security.api_key import APIKeyHeader
|
10 |
-
from pydantic import BaseModel
|
11 |
from typing import List, Optional
|
12 |
from dotenv import load_dotenv
|
13 |
from starlette.responses import StreamingResponse
|
14 |
from openai import OpenAI
|
15 |
-
from typing import List, Optional,
|
|
|
16 |
load_dotenv()
|
17 |
|
|
|
|
|
|
|
18 |
API_KEYS = [
|
19 |
os.getenv("API_GEMINI_1"),
|
20 |
os.getenv("API_GEMINI_2"),
|
21 |
os.getenv("API_GEMINI_3")
|
22 |
]
|
23 |
|
24 |
-
|
25 |
-
EXPECTED_API_KEY = os.getenv("API_HUGGINGFACE")
|
26 |
-
API_KEY_NAME = "Authorization"
|
27 |
-
|
28 |
class Message(BaseModel):
|
29 |
-
role:
|
30 |
-
content:
|
31 |
|
32 |
class ChatCompletionRequest(BaseModel):
|
33 |
model: str = "gemini-2.0-flash"
|
@@ -35,8 +35,11 @@ class ChatCompletionRequest(BaseModel):
|
|
35 |
max_tokens: Optional[int] = 8196
|
36 |
temperature: Optional[float] = 0.8
|
37 |
stream: Optional[bool] = False
|
|
|
|
|
|
|
38 |
|
39 |
-
|
40 |
app = FastAPI(title="OpenAI-SDK-compatible API", version="1.0.0", description="Un wrapper FastAPI compatibile con le specifiche dell'API OpenAI.")
|
41 |
app.add_middleware(
|
42 |
CORSMiddleware,
|
@@ -46,29 +49,32 @@ app.add_middleware(
|
|
46 |
allow_headers=["*"],
|
47 |
)
|
48 |
|
49 |
-
|
|
|
50 |
def verify_api_key(api_key: str = Depends(api_key_header)):
|
|
|
51 |
if not api_key:
|
52 |
raise HTTPException(status_code=403, detail="API key mancante")
|
53 |
if api_key != f"Bearer {EXPECTED_API_KEY}":
|
54 |
raise HTTPException(status_code=403, detail="API key non valida")
|
55 |
return api_key
|
56 |
|
|
|
57 |
def get_openai_client():
|
|
|
58 |
api_key = random.choice(API_KEYS)
|
59 |
return OpenAI(api_key=api_key, base_url=BASE_URL)
|
60 |
|
|
|
61 |
def call_api_sync(params: ChatCompletionRequest):
|
|
|
62 |
try:
|
63 |
client = get_openai_client()
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
temperature=params.temperature,
|
70 |
-
stream=params.stream
|
71 |
-
)
|
72 |
return response
|
73 |
except Exception as e:
|
74 |
if "429" in str(e):
|
@@ -77,26 +83,27 @@ def call_api_sync(params: ChatCompletionRequest):
|
|
77 |
else:
|
78 |
raise e
|
79 |
|
|
|
80 |
async def _resp_async_generator(params: ChatCompletionRequest):
|
|
|
81 |
client = get_openai_client()
|
82 |
try:
|
83 |
-
response = client.chat.completions.create(
|
84 |
-
model=params.model,
|
85 |
-
messages=[m.model_dump() for m in params.messages],
|
86 |
-
max_tokens=params.max_tokens,
|
87 |
-
temperature=params.temperature,
|
88 |
-
stream=True
|
89 |
-
)
|
90 |
for chunk in response:
|
91 |
chunk_data = chunk.to_dict() if hasattr(chunk, "to_dict") else chunk
|
92 |
yield f"data: {json.dumps(chunk_data)}\n\n"
|
93 |
await asyncio.sleep(0.01)
|
94 |
yield "data: [DONE]\n\n"
|
95 |
except Exception as e:
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
-
#
|
100 |
@app.get("/")
|
101 |
def read_general():
|
102 |
return {"response": "Benvenuto"}
|
@@ -105,20 +112,15 @@ def read_general():
|
|
105 |
async def health_check():
|
106 |
return {"message": "success"}
|
107 |
|
108 |
-
# ---------------------------------- Generazione Testo ---------------------------------------
|
109 |
-
|
110 |
@app.post("/v1/chat/completions", dependencies=[Depends(verify_api_key)])
|
111 |
async def chat_completions(req: ChatCompletionRequest):
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
return response
|
123 |
-
except Exception as e:
|
124 |
-
raise HTTPException(status_code=500, detail=str(e))
|
|
|
3 |
import random
|
4 |
import asyncio
|
5 |
import json
|
|
|
6 |
from fastapi import FastAPI, HTTPException, Depends
|
7 |
from fastapi.middleware.cors import CORSMiddleware
|
8 |
from fastapi.security.api_key import APIKeyHeader
|
9 |
+
from pydantic import BaseModel
|
10 |
from typing import List, Optional
|
11 |
from dotenv import load_dotenv
|
12 |
from starlette.responses import StreamingResponse
|
13 |
from openai import OpenAI
|
14 |
+
from typing import List, Optional, Dict, Any
|
15 |
+
|
16 |
load_dotenv()
|
17 |
|
18 |
+
BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
19 |
+
EXPECTED_API_KEY = os.getenv("API_HUGGINGFACE")
|
20 |
+
API_KEY_NAME = "Authorization"
|
21 |
API_KEYS = [
|
22 |
os.getenv("API_GEMINI_1"),
|
23 |
os.getenv("API_GEMINI_2"),
|
24 |
os.getenv("API_GEMINI_3")
|
25 |
]
|
26 |
|
27 |
+
# Classi Pydantic di VALIDAZIONE Body
|
|
|
|
|
|
|
28 |
class Message(BaseModel):
|
29 |
+
role: Any
|
30 |
+
content: Any
|
31 |
|
32 |
class ChatCompletionRequest(BaseModel):
|
33 |
model: str = "gemini-2.0-flash"
|
|
|
35 |
max_tokens: Optional[int] = 8196
|
36 |
temperature: Optional[float] = 0.8
|
37 |
stream: Optional[bool] = False
|
38 |
+
stream_options: Optional[Dict[str, Any]] = None
|
39 |
+
class Config:
|
40 |
+
extra = "allow"
|
41 |
|
42 |
+
# Server FAST API
|
43 |
app = FastAPI(title="OpenAI-SDK-compatible API", version="1.0.0", description="Un wrapper FastAPI compatibile con le specifiche dell'API OpenAI.")
|
44 |
app.add_middleware(
|
45 |
CORSMiddleware,
|
|
|
49 |
allow_headers=["*"],
|
50 |
)
|
51 |
|
52 |
+
# Validazione API
|
53 |
+
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
|
54 |
def verify_api_key(api_key: str = Depends(api_key_header)):
|
55 |
+
''' Validazione Chiave API - Per ora in ENV, Token HF '''
|
56 |
if not api_key:
|
57 |
raise HTTPException(status_code=403, detail="API key mancante")
|
58 |
if api_key != f"Bearer {EXPECTED_API_KEY}":
|
59 |
raise HTTPException(status_code=403, detail="API key non valida")
|
60 |
return api_key
|
61 |
|
62 |
+
# Client OpenAI
|
63 |
def get_openai_client():
|
64 |
+
''' Client OpenAI passando in modo RANDOM le Chiavi API. In questo modo posso aggirare i limiti "Quota Exceeded" '''
|
65 |
api_key = random.choice(API_KEYS)
|
66 |
return OpenAI(api_key=api_key, base_url=BASE_URL)
|
67 |
|
68 |
+
# Chiama API (senza Streaming)
|
69 |
def call_api_sync(params: ChatCompletionRequest):
|
70 |
+
''' Chiamata API senza streaming. Se da errore 429 lo rifa'''
|
71 |
try:
|
72 |
client = get_openai_client()
|
73 |
+
response_format = getattr(params, 'response_format', None)
|
74 |
+
if response_format and getattr(response_format, 'type', None) == 'json_schema':
|
75 |
+
response = client.beta.chat.completions.parse(**params.model_dump())
|
76 |
+
else:
|
77 |
+
response = client.chat.completions.create(**params.model_dump())
|
|
|
|
|
|
|
78 |
return response
|
79 |
except Exception as e:
|
80 |
if "429" in str(e):
|
|
|
83 |
else:
|
84 |
raise e
|
85 |
|
86 |
+
# Chiama API (con Streaming)
|
87 |
async def _resp_async_generator(params: ChatCompletionRequest):
|
88 |
+
''' Chiamata API con streaming. Se da errore 429 lo rifa'''
|
89 |
client = get_openai_client()
|
90 |
try:
|
91 |
+
response = client.chat.completions.create(**params.model_dump())
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
for chunk in response:
|
93 |
chunk_data = chunk.to_dict() if hasattr(chunk, "to_dict") else chunk
|
94 |
yield f"data: {json.dumps(chunk_data)}\n\n"
|
95 |
await asyncio.sleep(0.01)
|
96 |
yield "data: [DONE]\n\n"
|
97 |
except Exception as e:
|
98 |
+
if "429" in str(e):
|
99 |
+
await asyncio.sleep(2)
|
100 |
+
async for item in _resp_async_generator(params):
|
101 |
+
yield item
|
102 |
+
else:
|
103 |
+
error_data = {"error": str(e)}
|
104 |
+
yield f"data: {json.dumps(error_data)}\n\n"
|
105 |
|
106 |
+
# ---------------------------------- Metodi API ---------------------------------------
|
107 |
@app.get("/")
|
108 |
def read_general():
|
109 |
return {"response": "Benvenuto"}
|
|
|
112 |
async def health_check():
|
113 |
return {"message": "success"}
|
114 |
|
|
|
|
|
115 |
@app.post("/v1/chat/completions", dependencies=[Depends(verify_api_key)])
|
116 |
async def chat_completions(req: ChatCompletionRequest):
|
117 |
+
print(req)
|
118 |
+
try:
|
119 |
+
if not req.messages:
|
120 |
+
raise HTTPException(status_code=400, detail="Nessun messaggio fornito")
|
121 |
+
if not req.stream:
|
122 |
+
return call_api_sync(req)
|
123 |
+
else:
|
124 |
+
return StreamingResponse(_resp_async_generator(req), media_type="application/x-ndjson")
|
125 |
+
except Exception as e:
|
126 |
+
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|