Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -1,27 +1,31 @@
|
|
1 |
-
from fastapi import FastAPI, Request
|
2 |
-
from fastapi.middleware.cors import CORSMiddleware # Importa il middleware CORS
|
3 |
-
from pydantic import BaseModel
|
4 |
-
from huggingface_hub import InferenceClient
|
5 |
-
from datetime import datetime
|
6 |
-
from gradio_client import Client
|
7 |
-
import base64
|
8 |
-
import requests
|
9 |
import os
|
10 |
-
import socket
|
11 |
import time
|
12 |
-
from enum import Enum
|
13 |
import random
|
14 |
-
import aiohttp
|
15 |
import asyncio
|
16 |
import json
|
17 |
-
from types import SimpleNamespace
|
18 |
-
from io import BytesIO
|
19 |
-
from PIL import Image
|
20 |
-
|
21 |
-
#--------------------------------------------------- Definizione Server FAST API ------------------------------------------------------
|
22 |
-
app = FastAPI()
|
23 |
-
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
app.add_middleware(
|
26 |
CORSMiddleware,
|
27 |
allow_origins=["*"],
|
@@ -30,521 +34,90 @@ app.add_middleware(
|
|
30 |
allow_headers=["*"],
|
31 |
)
|
32 |
|
33 |
-
class
|
34 |
-
|
35 |
-
|
36 |
-
systemStyle: str = ''
|
37 |
-
instruction: str = ''
|
38 |
-
temperature: float = 0.7
|
39 |
-
max_new_tokens: int = 2000
|
40 |
-
top_p: float = 0.95
|
41 |
-
repetition_penalty: float = 1.0
|
42 |
-
asincrono: bool = False
|
43 |
-
NumeroGenerazioni: int = 1
|
44 |
-
StringaSplit: str = '********'
|
45 |
-
NumeroCaratteriSplitInstruction: int = 30000
|
46 |
-
EliminaRisposteNonPertinenti: bool = False
|
47 |
-
UnificaRispostaPertinente: bool = False
|
48 |
-
telegramChatId: str = ''
|
49 |
-
telegramUrlBot: str = ''
|
50 |
-
telegramUrlPost: str = ''
|
51 |
-
|
52 |
-
class InputDataAsync(InputData):
|
53 |
-
test: str = ''
|
54 |
-
|
55 |
-
class PostSpazio(BaseModel):
|
56 |
-
nomeSpazio: str
|
57 |
-
input: str = ''
|
58 |
-
api_name: str = "/chat"
|
59 |
-
|
60 |
-
def LoggaTesto(log_type, data, serializza=True):
|
61 |
-
if serializza:
|
62 |
-
formatted_data = json.dumps(data, indent=2)
|
63 |
-
else:
|
64 |
-
formatted_data = data
|
65 |
-
print(f"\n{datetime.now()}: ---------------------------------------------------------------| {log_type} |--------------------------------------------------------------\n{formatted_data}")
|
66 |
-
|
67 |
-
#--------------------------------------------------- Generazione TESTO ------------------------------------------------------
|
68 |
-
@app.post("/Genera")
|
69 |
-
def generate_text(request: Request, input_data: InputData):
|
70 |
-
if not input_data.asincrono:
|
71 |
-
LoggaTesto("INPUT", input_data.input, False)
|
72 |
-
temperature = input_data.temperature
|
73 |
-
max_new_tokens = input_data.max_new_tokens
|
74 |
-
top_p = input_data.top_p
|
75 |
-
repetition_penalty = input_data.repetition_penalty
|
76 |
-
input_text = generate_input_text(input_data)
|
77 |
-
history = []
|
78 |
-
generated_response = generate(input_text, history, temperature, max_new_tokens, top_p, repetition_penalty)
|
79 |
-
if input_data.telegramChatId != '' and input_data.telegramUrlBot != '' and input_data.telegramUrlPost != '':
|
80 |
-
asyncio.run(call_telegram_api(input_data, generated_response))
|
81 |
-
LoggaTesto("RISPOSTA", {"response": generated_response}, False)
|
82 |
-
return {"response": generated_response}
|
83 |
-
#return json.dumps({"response": generated_response})
|
84 |
-
else:
|
85 |
-
input_data.asincrono = False
|
86 |
-
if input_data.EliminaRisposteNonPertinenti:
|
87 |
-
msgEliminaRisposteNonPertinenti = " (Rispondi solo sulla base delle ISTRUZIONI che hai ricevuto. se non trovi corrispondenza tra RICHIESTA e ISTRUZIONI rispondi con <NOTFOUND>!!!)"
|
88 |
-
input_data.input = input_data.input + msgEliminaRisposteNonPertinenti
|
89 |
-
input_data.systemRole = input_data.systemRole + msgEliminaRisposteNonPertinenti
|
90 |
-
result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
|
91 |
-
#result_data = result_data.replace('"', '')
|
92 |
-
LoggaTesto("RISPOSTA ASINCRONA", {"response": result_data})
|
93 |
-
if input_data.EliminaRisposteNonPertinenti:
|
94 |
-
result_data = [item for item in result_data if "NOTFOUND" not in item["response"]]
|
95 |
-
if input_data.UnificaRispostaPertinente:
|
96 |
-
input_data.input= f'''Metti insieme le seguenti risposte. Basati solo su questo TESTO e non AGGIUNGERE ALTRO!!!!: {result_data}'''
|
97 |
-
input_data.systemRole = ''
|
98 |
-
input_data.systemStyle = 'Rispondi in ITALIANO'
|
99 |
-
input_data.instruction =''
|
100 |
-
result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
|
101 |
-
#result_data = result_data.replace('"', '')
|
102 |
-
LoggaTesto("RISPOSTA ASINCRONA UNIFICATA", {"response": result_data})
|
103 |
-
return {"response": result_data}
|
104 |
-
#return json.dumps({"response": result_data})
|
105 |
-
|
106 |
-
def call_telegram_api_OLD(input_data, text):
|
107 |
-
payload = {
|
108 |
-
"chat_id": input_data.telegramChatId,
|
109 |
-
"text": text,
|
110 |
-
"telegramUrl": input_data.telegramUrlBot
|
111 |
-
}
|
112 |
-
response = requests.post(input_data.telegramUrlPost, json=payload)
|
113 |
-
if response.status_code == 200:
|
114 |
-
print("Invio messaggio TELEGRAM")
|
115 |
-
else:
|
116 |
-
print("Errore nella richiesta POST. Codice di stato:", response.status_code)
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
async with aiohttp.ClientSession() as session:
|
125 |
-
async with session.post(input_data.telegramUrlPost, json=payload) as response:
|
126 |
-
response_text = await response.text()
|
127 |
|
128 |
-
def
|
129 |
-
if
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
"input": {{
|
141 |
-
"role": "system",
|
142 |
-
"content": "{input_data.systemRole}",
|
143 |
-
"style": "{input_data.systemStyle}"
|
144 |
-
}},
|
145 |
-
"messages": [
|
146 |
-
{{
|
147 |
-
"role": "instructions",
|
148 |
-
"content": "{input_data.instruction} "("{input_data.systemStyle}")"
|
149 |
-
}},
|
150 |
-
{{
|
151 |
-
"role": "user",
|
152 |
-
"content": "{input_data.input}"
|
153 |
-
}}
|
154 |
-
]
|
155 |
-
}}
|
156 |
-
'''
|
157 |
-
else:
|
158 |
-
input_text = input_data.input
|
159 |
-
return input_text
|
160 |
-
|
161 |
-
def generate(prompt, history, temperature=0.7, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
|
162 |
-
temperature = float(temperature)
|
163 |
-
if temperature < 1e-2:
|
164 |
-
temperature = 1e-2
|
165 |
-
top_p = float(top_p)
|
166 |
-
generate_kwargs = dict(
|
167 |
-
temperature=temperature,
|
168 |
-
max_new_tokens=max_new_tokens,
|
169 |
-
top_p=top_p,
|
170 |
-
repetition_penalty=repetition_penalty,
|
171 |
-
do_sample=True,
|
172 |
-
seed=random.randint(0, 10**7),
|
173 |
-
)
|
174 |
-
formatted_prompt = format_prompt(prompt, history)
|
175 |
-
output = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False)
|
176 |
-
return output
|
177 |
-
|
178 |
-
def format_prompt(message, history):
|
179 |
-
prompt = "<s>"
|
180 |
-
for user_prompt, bot_response in history:
|
181 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
182 |
-
prompt += f" {bot_response}</s> "
|
183 |
-
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
|
184 |
-
prompt += f"[{now}] [INST] {message} [/INST]"
|
185 |
-
return prompt
|
186 |
-
|
187 |
-
#--------------------------------------------------- Generazione TESTO ASYNC ------------------------------------------------------
|
188 |
-
@app.post("/GeneraAsync")
|
189 |
-
def generate_textAsync(request: Request, input_data: InputDataAsync):
|
190 |
-
result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
|
191 |
-
return {"response": result_data}
|
192 |
-
|
193 |
-
async def make_request(session, token, data, url, index, semaphore, max_retries=3):
|
194 |
-
async with semaphore:
|
195 |
-
headers = {
|
196 |
-
'Content-Type': 'application/json',
|
197 |
-
'Authorization': 'Bearer ' + token
|
198 |
-
}
|
199 |
-
if (int(index)+1) % 3 == 1:
|
200 |
-
data['max_new_tokens'] = data['max_new_tokens']
|
201 |
-
elif (int(index)+1) % 3 == 2:
|
202 |
-
data['max_new_tokens'] = max(200, data['max_new_tokens'] - 200)
|
203 |
-
else:
|
204 |
-
data['max_new_tokens'] = data['max_new_tokens'] + 200
|
205 |
-
for _ in range(max_retries):
|
206 |
-
try:
|
207 |
-
async with session.post(url, headers=headers, json=data) as response:
|
208 |
-
response.raise_for_status()
|
209 |
-
try:
|
210 |
-
result_data = await response.json()
|
211 |
-
except aiohttp.ContentTypeError:
|
212 |
-
result_data = await response.text()
|
213 |
-
return result_data
|
214 |
-
except (asyncio.TimeoutError, aiohttp.ClientError, requests.exceptions.HTTPError) as e:
|
215 |
-
LoggaTesto("ERRORE ASYNC", {e}, False)
|
216 |
-
if isinstance(e, (asyncio.TimeoutError, requests.exceptions.HTTPError)) and e.response.status in [502, 504]:
|
217 |
-
break
|
218 |
-
await asyncio.sleep(3)
|
219 |
-
raise Exception("Max retries reached or skipping retries. Unable to make the request.")
|
220 |
-
|
221 |
-
async def CreaListaInput(input_data):
|
222 |
-
if input_data.instruction.startswith("http"):
|
223 |
-
try:
|
224 |
-
resp = requests.get(input_data.instruction)
|
225 |
-
resp.raise_for_status()
|
226 |
-
input_data.instruction = resp.text
|
227 |
-
except requests.exceptions.RequestException as e:
|
228 |
-
input_data.instruction = ""
|
229 |
try:
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
else:
|
236 |
-
|
237 |
-
except json.JSONDecodeError:
|
238 |
-
nuova_lista_dizionari = DividiInstructionText(input_data)
|
239 |
-
return nuova_lista_dizionari
|
240 |
-
|
241 |
-
def split_at_space_or_dot(input_string, length):
|
242 |
-
delimiters = ['\n\n', '.\n', ';\n', '.', ' ']
|
243 |
-
positions = [input_string.rfind(d, 0, length) for d in delimiters]
|
244 |
-
valid_positions = [pos for pos in positions if pos >= 0]
|
245 |
-
lastpos = max(valid_positions) if valid_positions else length
|
246 |
-
indice_divisione = int(lastpos)
|
247 |
-
return indice_divisione + 1
|
248 |
-
|
249 |
-
def DividiInstructionJSON(lista_dizionari, input_data):
|
250 |
-
ListaInput = []
|
251 |
-
nuova_lista_dizionari = []
|
252 |
-
for dizionario in lista_dizionari:
|
253 |
-
titolo = dizionario["Titolo"]
|
254 |
-
testo_completo = dizionario["Testo"]
|
255 |
-
while len(testo_completo) > input_data.NumeroCaratteriSplitInstruction:
|
256 |
-
indice_divisione = split_at_space_or_dot(testo_completo, input_data.NumeroCaratteriSplitInstruction)
|
257 |
-
indice_divisione_precedente = split_at_space_or_dot(testo_completo, input_data.NumeroCaratteriSplitInstruction-100)
|
258 |
-
sottostringa = testo_completo[:indice_divisione].strip()
|
259 |
-
testo_completo = testo_completo[indice_divisione_precedente:].strip()
|
260 |
-
nuovo_dizionario = {"Titolo": titolo, "Testo": sottostringa}
|
261 |
-
nuova_lista_dizionari.append(nuovo_dizionario)
|
262 |
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
data = {
|
273 |
-
'input': input_string,
|
274 |
-
'instruction': str(dizionario),
|
275 |
-
'temperature': input_data.temperature,
|
276 |
-
'max_new_tokens': input_data.max_new_tokens,
|
277 |
-
'top_p': input_data.top_p,
|
278 |
-
'repetition_penalty': input_data.repetition_penalty,
|
279 |
-
'systemRole': systemRole_string,
|
280 |
-
'systemStyle': input_data.systemStyle,
|
281 |
-
'telegramChatId': input_data.telegramChatId,
|
282 |
-
'telegramUrlBot': input_data.telegramUrlBot,
|
283 |
-
'telegramUrlPost': input_data.telegramUrlPost
|
284 |
-
}
|
285 |
-
ListaInput.append(data)
|
286 |
-
return ListaInput
|
287 |
-
|
288 |
-
def DividiInstructionText(input_data):
|
289 |
-
ListaInput = []
|
290 |
-
input_str = input_data.instruction
|
291 |
-
StringaSplit = input_data.StringaSplit
|
292 |
-
sottostringhe = []
|
293 |
-
indice_inizio = 0
|
294 |
-
if len(input_str) > input_data.NumeroCaratteriSplitInstruction:
|
295 |
-
while indice_inizio < len(input_str):
|
296 |
-
lunghezza_sottostringa = split_at_space_or_dot(input_str[indice_inizio:], input_data.NumeroCaratteriSplitInstruction)
|
297 |
-
sottostringhe.append(input_str[indice_inizio:indice_inizio + lunghezza_sottostringa].strip())
|
298 |
-
indice_inizio += lunghezza_sottostringa
|
299 |
-
else:
|
300 |
-
sottostringhe.append(input_str)
|
301 |
-
testoSeparato = StringaSplit.join(sottostringhe)
|
302 |
-
instruction_strings = testoSeparato.split(StringaSplit)
|
303 |
-
input_strings = input_data.input.split(input_data.StringaSplit)
|
304 |
-
systemRole_strings = input_data.systemRole.split(input_data.StringaSplit)
|
305 |
-
for systemRole_string in systemRole_strings:
|
306 |
-
for input_string in input_strings:
|
307 |
-
for instruction_string in instruction_strings:
|
308 |
-
data = {
|
309 |
-
'input': input_string.strip(),
|
310 |
-
'instruction': str([instruction_string.strip()]),
|
311 |
-
'temperature': input_data.temperature,
|
312 |
-
'max_new_tokens': input_data.max_new_tokens,
|
313 |
-
'top_p': input_data.top_p,
|
314 |
-
'repetition_penalty': input_data.repetition_penalty,
|
315 |
-
'systemRole': systemRole_string.strip(),
|
316 |
-
'systemStyle': input_data.systemStyle,
|
317 |
-
'telegramChatId': input_data.telegramChatId,
|
318 |
-
'telegramUrlBot': input_data.telegramUrlBot,
|
319 |
-
'telegramUrlPost': input_data.telegramUrlPost
|
320 |
-
}
|
321 |
-
ListaInput.append(data)
|
322 |
-
return ListaInput
|
323 |
-
|
324 |
-
async def GeneraTestoAsync(url, input_data):
|
325 |
-
token = os.getenv('TOKEN')
|
326 |
-
semaphore = asyncio.Semaphore(20)
|
327 |
-
async with aiohttp.ClientSession() as session:
|
328 |
-
tasks = []
|
329 |
-
ListaInput = await CreaListaInput(input_data)
|
330 |
-
for data in ListaInput:
|
331 |
-
LoggaTesto("RICHIESTA ASINCRONA", data)
|
332 |
-
tasks.extend([make_request(session, token, data, url, index, semaphore) for index in range(input_data.NumeroGenerazioni)])
|
333 |
-
#tasks.extend([generate_text_internal(data) for _ in range(input_data.NumeroGenerazioni)])
|
334 |
-
await asyncio.sleep(0.1)
|
335 |
-
return await asyncio.gather(*tasks)
|
336 |
-
|
337 |
-
async def generate_text_internal(datajson):
|
338 |
-
data = SimpleNamespace(**datajson)
|
339 |
-
temperature = data.temperature
|
340 |
-
max_new_tokens = data.max_new_tokens
|
341 |
-
top_p = data.top_p
|
342 |
-
repetition_penalty = data.repetition_penalty
|
343 |
-
input_text = generate_input_text(data)
|
344 |
-
max_new_tokens = min(max_new_tokens, 29500 - len(input_text))
|
345 |
-
history = []
|
346 |
-
generated_response = generate(input_text, history, temperature, max_new_tokens, top_p, repetition_penalty)
|
347 |
-
return generated_response
|
348 |
-
|
349 |
-
#--------------------------------------------------- Generazione IMMAGINE ------------------------------------------------------
|
350 |
-
style_image = {
|
351 |
-
"PROFESSIONAL-PHOTO": {
|
352 |
-
"descrizione": "Professional photo {prompt} . Vivid colors, Mirrorless, 35mm lens, f/1.8 aperture, ISO 100, natural daylight",
|
353 |
-
"negativePrompt": "out of frame, lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
|
354 |
-
},
|
355 |
-
"CINEMATIC-PHOTO": {
|
356 |
-
"descrizione": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
|
357 |
-
"negativePrompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly"
|
358 |
-
},
|
359 |
-
"CINEMATIC-PORTRAIT": {
|
360 |
-
"descrizione": "cinematic portrait {prompt} 8k, ultra realistic, good vibes, vibrant",
|
361 |
-
"negativePrompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly"
|
362 |
-
},
|
363 |
-
"LINE-ART-DRAWING": {
|
364 |
-
"descrizione": "line art drawing {prompt} . professional, sleek, modern, minimalist, graphic, line art, vector graphics",
|
365 |
-
"negativePrompt": "anime, photorealistic, 35mm film, deformed, glitch, blurry, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, mutated, realism, realistic, impressionism, expressionism, oil, acrylic"
|
366 |
-
},
|
367 |
-
"COMIC": {
|
368 |
-
"descrizione": "comic {prompt} . graphic illustration, comic art, graphic novel art, vibrant, highly detailed",
|
369 |
-
"negativePrompt": "photograph, deformed, glitch, noisy, realistic, stock photo"
|
370 |
-
},
|
371 |
-
"ADVERTISING-POSTER-STYLE": {
|
372 |
-
"descrizione": "advertising poster style {prompt} . Professional, modern, product-focused, commercial, eye-catching, highly detailed",
|
373 |
-
"negativePrompt": "noisy, blurry, amateurish, sloppy, unattractive"
|
374 |
-
},
|
375 |
-
"RETAIL-PACKAGING-STYLE": {
|
376 |
-
"descrizione": "retail packaging style {prompt} . vibrant, enticing, commercial, product-focused, eye-catching, professional, highly detailed",
|
377 |
-
"negativePrompt": "noisy, blurry, amateurish, sloppy, unattractive"
|
378 |
-
},
|
379 |
-
"GRAFFITI-STYLE": {
|
380 |
-
"descrizione": "graffiti style {prompt} . street art, vibrant, urban, detailed, tag, mural",
|
381 |
-
"negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic"
|
382 |
-
},
|
383 |
-
"POP-ART-STYLE": {
|
384 |
-
"descrizione": "pop Art style {prompt} . bright colors, bold outlines, popular culture themes, ironic or kitsch",
|
385 |
-
"negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, minimalist"
|
386 |
-
},
|
387 |
-
"ISOMETRIC-STYLE": {
|
388 |
-
"descrizione": "isometric style {prompt} . vibrant, beautiful, crisp, detailed, ultra detailed, intricate",
|
389 |
-
"negativePrompt": "deformed, mutated, ugly, disfigured, blur, blurry, noise, noisy, realistic, photographic"
|
390 |
-
},
|
391 |
-
"LOW-POLY-STYLE": {
|
392 |
-
"descrizione": "low-poly style {prompt}. ambient occlusion, low-poly game art, polygon mesh, jagged, blocky, wireframe edges, centered composition",
|
393 |
-
"negativePrompt": "noisy, sloppy, messy, grainy, highly detailed, ultra textured, photo"
|
394 |
-
},
|
395 |
-
"CLAYMATION-STYLE": {
|
396 |
-
"descrizione": "claymation style {prompt} . sculpture, clay art, centered composition, play-doh",
|
397 |
-
"negativePrompt": ""
|
398 |
-
},
|
399 |
-
"PROFESSIONAL-3D-MODEL": {
|
400 |
-
"descrizione": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
|
401 |
-
"negativePrompt": "ugly, deformed, noisy, low poly, blurry, painting"
|
402 |
-
},
|
403 |
-
"ANIME-ARTWORK": {
|
404 |
-
"descrizione": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
|
405 |
-
"negativePrompt": "photo, deformed, black and white, realism, disfigured, low contrast"
|
406 |
-
},
|
407 |
-
"ETHEREAL-FANTASY-CONCEPT-ART": {
|
408 |
-
"descrizione": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
|
409 |
-
"negativePrompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white"
|
410 |
-
},
|
411 |
-
"CYBERNETIC-STYLE": {
|
412 |
-
"descrizione": "cybernetic style {prompt} . futuristic, technological, cybernetic enhancements, robotics, artificial intelligence themes",
|
413 |
-
"negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, historical, medieval"
|
414 |
-
},
|
415 |
-
"FUTURISTIC-STYLE": {
|
416 |
-
"descrizione": "futuristic style {prompt} . sleek, modern, ultramodern, high tech, detailed",
|
417 |
-
"negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, vintage, antique"
|
418 |
-
},
|
419 |
-
"SCI-FI-STYLE": {
|
420 |
-
"descrizione": "sci-fi style {prompt} . futuristic, technological, alien worlds, space themes, advanced civilizations",
|
421 |
-
"negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, historical, medieval"
|
422 |
-
},
|
423 |
-
"DIGITAL-ART": {
|
424 |
-
"descrizione": "Digital Art {prompt} . vibrant, cute, digital, handmade",
|
425 |
-
"negativePrompt": ""
|
426 |
-
},
|
427 |
-
"SIMPLE-LOGO": {
|
428 |
-
"descrizione": "Minimalist Logo {prompt} . material design, primary colors, stylized, minimalist",
|
429 |
-
"negativePrompt": "3D, high detail, noise, grainy, blurry, painting, drawing, photo, disfigured"
|
430 |
-
},
|
431 |
-
"MINIMALISTIC-LOGO": {
|
432 |
-
"descrizione": "Ultra-minimalist Material Design logo for a BRAND: {prompt} . simple, few colors, clean lines, minimal details, modern color palette, no shadows",
|
433 |
-
"negativePrompt": "3D, high detail, noise, grainy, blurry, painting, drawing, photo, disfigured"
|
434 |
-
}
|
435 |
-
}
|
436 |
-
|
437 |
-
class InputImage(BaseModel):
|
438 |
-
input: str
|
439 |
-
negativePrompt: str = ''
|
440 |
-
style: str = ''
|
441 |
-
steps: int = 25
|
442 |
-
cfg: int = 6
|
443 |
-
seed: int = -1
|
444 |
-
variante = False
|
445 |
-
|
446 |
-
@app.post("/Immagine")
|
447 |
-
def generate_image(request: Request, input_data: InputImage):
|
448 |
-
#client = Client("https://manjushri-sdxl-1-0.hf.space/")
|
449 |
-
|
450 |
-
if input_data.style:
|
451 |
-
print(input_data.style)
|
452 |
-
if input_data.style == 'RANDOM':
|
453 |
-
random_style = random.choice(list(style_image.keys()))
|
454 |
-
style_info = style_image[random_style]
|
455 |
-
input_data.input = style_info["descrizione"].format(prompt=input_data.input)
|
456 |
-
input_data.negativePrompt = style_info["negativePrompt"]
|
457 |
-
elif input_data.style in style_image:
|
458 |
-
style_info = style_image[input_data.style]
|
459 |
-
input_data.input = style_info["descrizione"].format(prompt=input_data.input)
|
460 |
-
input_data.negativePrompt = style_info["negativePrompt"]
|
461 |
-
max_attempts = 5
|
462 |
-
attempt = 0
|
463 |
-
while attempt < max_attempts:
|
464 |
-
try:
|
465 |
-
print(input_data.input)
|
466 |
-
if input_data.variante == False:
|
467 |
-
#client = Client("AP123/SDXL-Lightning")
|
468 |
-
client = Client("ByteDance/SDXL-Lightning")
|
469 |
-
result = client.predict(
|
470 |
-
input_data.input,
|
471 |
-
"8-Step",
|
472 |
-
api_name="/generate_image"
|
473 |
-
)
|
474 |
-
image_url = result
|
475 |
-
else:
|
476 |
-
#client = Client("https://playgroundai-playground-v2-5.hf.space/--replicas/9kuov/")
|
477 |
-
client = Client("https://choimirai-playground-v2-5.hf.space/--replicas/bgsav/")
|
478 |
-
result = client.predict(
|
479 |
-
input_data.input, # str in 'Prompt' Textbox component
|
480 |
-
input_data.negativePrompt, # str in 'Negative prompt' Textbox component
|
481 |
-
True, # bool in 'Use negative prompt' Checkbox component
|
482 |
-
0, # float (numeric value between 0 and 2147483647) in 'Seed' Slider component
|
483 |
-
1024, # float (numeric value between 256 and 1536) in 'Width' Slider component
|
484 |
-
1024, # float (numeric value between 256 and 1536) in 'Height' Slider component
|
485 |
-
3, # float (numeric value between 0.1 and 20) in 'Guidance Scale' Slider component
|
486 |
-
True, # bool in 'Randomize seed' Checkbox component
|
487 |
-
api_name="/run"
|
488 |
-
)
|
489 |
-
image_url = result[0][0]['image']
|
490 |
-
print(image_url)
|
491 |
-
with open(image_url, 'rb') as img_file:
|
492 |
-
img_binary = img_file.read()
|
493 |
-
img_base64 = base64.b64encode(img_binary).decode('utf-8')
|
494 |
-
return {"response": img_base64}
|
495 |
-
except requests.exceptions.HTTPError as e:
|
496 |
-
time.sleep(1)
|
497 |
-
attempt += 1
|
498 |
-
if attempt < max_attempts:
|
499 |
-
continue
|
500 |
-
else:
|
501 |
-
return {"error": "Errore interno del server persistente!"}
|
502 |
-
return {"error": "Numero massimo di tentativi raggiunto"}
|
503 |
-
|
504 |
-
#--------------------------------------------------- IMAGE TO TEXT ------------------------------------------------------
|
505 |
-
class InputImageToText(BaseModel):
|
506 |
-
base64: str
|
507 |
-
input: str = ''
|
508 |
-
|
509 |
-
def base64_in_immagine(dati_base64):
|
510 |
-
immagine = base64.b64decode(dati_base64)
|
511 |
-
immagine_pil = Image.open(BytesIO(immagine))
|
512 |
-
nome_file = "/tmp/img.jpg"
|
513 |
-
immagine_pil.save(nome_file)
|
514 |
-
|
515 |
-
@app.post("/Image_To_Text")
|
516 |
-
def image_to_text(request: Request, input_data: InputImageToText):
|
517 |
-
base64_in_immagine(input_data.base64)
|
518 |
-
if input_data.input == '':
|
519 |
-
input_data.input = 'Describe the image'
|
520 |
-
Version = 1
|
521 |
-
if Version == 1:
|
522 |
-
client = Client("https://vikhyatk-moondream1.hf.space/--replicas/av7ct/")
|
523 |
-
result = client.predict(
|
524 |
-
"/tmp/img.jpg",
|
525 |
-
input_data.input,
|
526 |
-
api_name="/answer_question"
|
527 |
)
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
534 |
)
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
client = Client(input_data.nomeSpazio)
|
542 |
-
result = client.predict(
|
543 |
-
input_data.input,
|
544 |
-
api_name=input_data.api_name
|
545 |
-
)
|
546 |
-
return {"response": result}
|
547 |
|
548 |
@app.get("/")
|
549 |
def read_general():
|
550 |
-
return {"response": "Benvenuto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
|
|
2 |
import time
|
|
|
3 |
import random
|
|
|
4 |
import asyncio
|
5 |
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
from fastapi import FastAPI, HTTPException, Depends
|
8 |
+
from fastapi.middleware.cors import CORSMiddleware
|
9 |
+
from fastapi.security.api_key import APIKeyHeader
|
10 |
+
from pydantic import BaseModel, Field, field_validator
|
11 |
+
from typing import List, Optional
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
from starlette.responses import StreamingResponse
|
14 |
+
from openai import OpenAI
|
15 |
+
from typing import List, Optional, Type
|
16 |
+
load_dotenv()
|
17 |
+
|
18 |
+
API_KEYS = [
|
19 |
+
os.getenv("API_GEMINI_1"),
|
20 |
+
os.getenv("API_GEMINI_2"),
|
21 |
+
os.getenv("API_GEMINI_3")
|
22 |
+
]
|
23 |
+
|
24 |
+
BASE_URL = os.getenv("BASE_URL", "https://generativelanguage.googleapis.com/v1beta/openai/")
|
25 |
+
EXPECTED_API_KEY = os.getenv("EXPECTED_API_KEY", "1234")
|
26 |
+
API_KEY_NAME = "Authorization"
|
27 |
+
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
|
28 |
+
app = FastAPI(title="OpenAI-SDK-compatible API", version="1.0.0", description="Un wrapper FastAPI compatibile con le specifiche dell'API OpenAI.")
|
29 |
app.add_middleware(
|
30 |
CORSMiddleware,
|
31 |
allow_origins=["*"],
|
|
|
34 |
allow_headers=["*"],
|
35 |
)
|
36 |
|
37 |
+
class Message(BaseModel):
|
38 |
+
role: str
|
39 |
+
content: str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
class ChatCompletionRequest(BaseModel):
|
42 |
+
model: str = "gemini-2.0-flash"
|
43 |
+
messages: List[Message]
|
44 |
+
max_tokens: Optional[int] = 8196
|
45 |
+
temperature: Optional[float] = 0.8
|
46 |
+
stream: Optional[bool] = False
|
|
|
|
|
|
|
47 |
|
48 |
+
def verify_api_key(api_key: str = Depends(api_key_header)):
|
49 |
+
if not api_key:
|
50 |
+
raise HTTPException(status_code=403, detail="API key mancante")
|
51 |
+
if api_key != f"Bearer {EXPECTED_API_KEY}":
|
52 |
+
raise HTTPException(status_code=403, detail="API key non valida")
|
53 |
+
return api_key
|
54 |
+
|
55 |
+
def get_openai_client():
|
56 |
+
api_key = random.choice(API_KEYS)
|
57 |
+
return OpenAI(api_key=api_key, base_url=BASE_URL)
|
58 |
+
|
59 |
+
def call_api_sync(params: ChatCompletionRequest):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
try:
|
61 |
+
client = get_openai_client()
|
62 |
+
print(params)
|
63 |
+
response = client.chat.completions.create(
|
64 |
+
model=params.model,
|
65 |
+
messages=[m.model_dump() for m in params.messages],
|
66 |
+
max_tokens=params.max_tokens,
|
67 |
+
temperature=params.temperature,
|
68 |
+
stream=params.stream
|
69 |
+
)
|
70 |
+
return response
|
71 |
+
except Exception as e:
|
72 |
+
if "429" in str(e):
|
73 |
+
time.sleep(2)
|
74 |
+
return call_api_sync(params)
|
75 |
else:
|
76 |
+
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
+
async def _resp_async_generator(params: ChatCompletionRequest):
|
79 |
+
client = get_openai_client()
|
80 |
+
try:
|
81 |
+
response = client.chat.completions.create(
|
82 |
+
model=params.model,
|
83 |
+
messages=[m.model_dump() for m in params.messages],
|
84 |
+
max_tokens=params.max_tokens,
|
85 |
+
temperature=params.temperature,
|
86 |
+
stream=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
)
|
88 |
+
for chunk in response:
|
89 |
+
chunk_data = chunk.to_dict() if hasattr(chunk, "to_dict") else chunk
|
90 |
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
91 |
+
await asyncio.sleep(0.01)
|
92 |
+
yield "data: [DONE]\n\n"
|
93 |
+
except Exception as e:
|
94 |
+
error_data = {"error": str(e)}
|
95 |
+
yield f"data: {json.dumps(error_data)}\n\n"
|
96 |
+
|
97 |
+
@app.get("/health")
|
98 |
+
async def health_check():
|
99 |
+
return {"message": "success"}
|
100 |
+
|
101 |
+
@app.post("/v1/chat/completions", dependencies=[Depends(verify_api_key)])
|
102 |
+
async def chat_completions(req: ChatCompletionRequest):
|
103 |
+
if not req.messages:
|
104 |
+
raise HTTPException(status_code=400, detail="Nessun messaggio fornito")
|
105 |
+
if req.stream:
|
106 |
+
return StreamingResponse(
|
107 |
+
_resp_async_generator(req),
|
108 |
+
media_type="application/x-ndjson"
|
109 |
)
|
110 |
+
else:
|
111 |
+
try:
|
112 |
+
response = call_api_sync(req)
|
113 |
+
return response
|
114 |
+
except Exception as e:
|
115 |
+
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
@app.get("/")
|
118 |
def read_general():
|
119 |
+
return {"response": "Benvenuto"}
|
120 |
+
|
121 |
+
if __name__ == "__main__":
|
122 |
+
import uvicorn
|
123 |
+
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|