vcollos commited on
Commit
9eae8b5
·
verified ·
1 Parent(s): ed1cb43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +441 -633
app.py CHANGED
@@ -1,246 +1,31 @@
1
  import os
2
- import logging
3
-
4
- # Configuração básica de logging
5
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
6
- logger = logging.getLogger(__name__)
7
- logger.info("Iniciando aplicação")
8
-
9
- # Importação do módulo de compatibilidade antes das bibliotecas problemáticas
10
- try:
11
- logger.info("Carregando módulo de compatibilidade HF")
12
- import hf_compat
13
- except Exception as e:
14
- logger.error(f"Erro ao carregar módulo de compatibilidade: {e}")
15
- raise
16
-
17
- # Agora podemos importar as bibliotecas normalmente
18
- import json
19
- import copy
20
  import time
21
  import random
 
 
22
  import numpy as np
23
- from typing import Any, Dict, List, Optional, Union
24
- import gradio as gr
25
  import torch
 
26
  from PIL import Image
27
- import spaces
28
  import io
29
- from supabase import create_client, Client
30
  from datetime import datetime
 
31
 
32
- # Imports problemáticos agora devem funcionar
33
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
34
- from diffusers.utils import load_image
35
- from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
36
- import requests
37
- from io import BytesIO
38
- import datetime
39
-
40
- logger.info("Bibliotecas importadas com sucesso")
41
-
42
- # Configuração de Logging para facilitar diagnóstico
43
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
44
  logger = logging.getLogger(__name__)
45
 
46
- from transformers import pipeline
47
-
48
- from diffusers import (
49
- DiffusionPipeline,
50
- AutoencoderTiny,
51
- AutoencoderKL,
52
- AutoPipelineForImage2Image,
53
- FluxPipeline,
54
- FlowMatchEulerDiscreteScheduler)
55
-
56
- logger.info("Carregando transformers e diffusers...")
57
-
58
- from huggingface_hub import (
59
- hf_hub_download,
60
- HfFileSystem,
61
- ModelCard,
62
- snapshot_download)
63
-
64
- from diffusers.utils import load_image
65
-
66
- # Inicializa Supabase
67
- url: str = os.getenv('SUPABASE_URL')
68
- key: str = os.getenv('SUPABASE_KEY')
69
-
70
- try:
71
- if url and key:
72
- supabase: Client = create_client(url, key)
73
- logger.info("Supabase inicializado com sucesso")
74
- else:
75
- logger.warning("Variáveis de ambiente SUPABASE_URL ou SUPABASE_KEY não configuradas")
76
- supabase = None
77
- except Exception as e:
78
- logger.error(f"Erro ao inicializar Supabase: {e}")
79
- supabase = None
80
-
81
- from huggingface_hub import HfApi
82
- token = os.getenv("HF_TOKEN")
83
-
84
- def calculate_shift(
85
- image_seq_len,
86
- base_seq_len: int = 256,
87
- max_seq_len: int = 4096,
88
- base_shift: float = 0.5,
89
- max_shift: float = 1.16,
90
- ):
91
- m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
92
- b = base_shift - m * base_seq_len
93
- mu = image_seq_len * m + b
94
- return mu
95
-
96
- def retrieve_timesteps(
97
- scheduler,
98
- num_inference_steps: Optional[int] = None,
99
- device: Optional[Union[str, torch.device]] = None,
100
- timesteps: Optional[List[int]] = None,
101
- sigmas: Optional[List[float]] = None,
102
- **kwargs,
103
- ):
104
- if timesteps is not None and sigmas is not None:
105
- raise ValueError("Apenas um entre `timesteps` ou `sigmas` pode ser passado. Por favor, escolha um para definir valores personalizados")
106
-
107
- if timesteps is not None:
108
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
109
- timesteps = scheduler.timesteps
110
- num_inference_steps = len(timesteps)
111
- elif sigmas is not None:
112
- scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
113
- timesteps = scheduler.timesteps
114
- num_inference_steps = len(timesteps)
115
- else:
116
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
117
- timesteps = scheduler.timesteps
118
- return timesteps, num_inference_steps
119
-
120
- # FLUX pipeline
121
- @torch.inference_mode()
122
- def flux_pipe_call_that_returns_an_iterable_of_images(
123
- self,
124
- prompt: Union[str, List[str]] = None,
125
- prompt_2: Optional[Union[str, List[str]]] = None,
126
- height: Optional[int] = None,
127
- width: Optional[int] = None,
128
- num_inference_steps: int = 32,
129
- timesteps: List[int] = None,
130
- guidance_scale: float = 3.5,
131
- num_images_per_prompt: Optional[int] = 2,
132
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
133
- latents: Optional[torch.FloatTensor] = None,
134
- prompt_embeds: Optional[torch.FloatTensor] = None,
135
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
136
- output_type: Optional[str] = "pil",
137
- return_dict: bool = True,
138
- joint_attention_kwargs: Optional[Dict[str, Any]] = None,
139
- max_sequence_length: int = 512,
140
- good_vae: Optional[Any] = None,
141
- ):
142
- try:
143
- height = height or self.default_sample_size * self.vae_scale_factor
144
- width = width or self.default_sample_size * self.vae_scale_factor
145
-
146
- self.check_inputs(
147
- prompt,
148
- prompt_2,
149
- height,
150
- width,
151
- prompt_embeds=prompt_embeds,
152
- pooled_prompt_embeds=pooled_prompt_embeds,
153
- max_sequence_length=max_sequence_length,
154
- )
155
-
156
- self._guidance_scale = guidance_scale
157
- self._joint_attention_kwargs = joint_attention_kwargs
158
- self._interrupt = False
159
-
160
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
161
- device = self._execution_device
162
-
163
- lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
164
- prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
165
- prompt=prompt,
166
- prompt_2=prompt_2,
167
- prompt_embeds=prompt_embeds,
168
- pooled_prompt_embeds=pooled_prompt_embeds,
169
- device=device,
170
- num_images_per_prompt=num_images_per_prompt,
171
- max_sequence_length=max_sequence_length,
172
- lora_scale=lora_scale,
173
- )
174
-
175
- num_channels_latents = self.transformer.config.in_channels // 4
176
- latents, latent_image_ids = self.prepare_latents(
177
- batch_size * num_images_per_prompt,
178
- num_channels_latents,
179
- height,
180
- width,
181
- prompt_embeds.dtype,
182
- device,
183
- generator,
184
- latents,
185
- )
186
-
187
- sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
188
- image_seq_len = latents.shape[1]
189
- mu = calculate_shift(
190
- image_seq_len,
191
- self.scheduler.config.base_image_seq_len,
192
- self.scheduler.config.max_image_seq_len,
193
- self.scheduler.config.base_shift,
194
- self.scheduler.config.max_shift,
195
- )
196
- timesteps, num_inference_steps = retrieve_timesteps(
197
- self.scheduler,
198
- num_inference_steps,
199
- device,
200
- timesteps,
201
- sigmas,
202
- mu=mu,
203
- )
204
- self._num_timesteps = len(timesteps)
205
-
206
- guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
207
-
208
- for i, t in enumerate(timesteps):
209
- if self.interrupt:
210
- continue
211
 
212
- timestep = t.expand(latents.shape[0]).to(latents.dtype)
213
-
214
- noise_pred = self.transformer(
215
- hidden_states=latents,
216
- timestep=timestep / 1000,
217
- guidance=guidance,
218
- pooled_projections=pooled_prompt_embeds,
219
- encoder_hidden_states=prompt_embeds,
220
- txt_ids=text_ids,
221
- img_ids=latent_image_ids,
222
- joint_attention_kwargs=self.joint_attention_kwargs,
223
- return_dict=False,
224
- )[0]
225
-
226
- latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
227
- latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
228
- image = self.vae.decode(latents_for_image, return_dict=False)[0]
229
- yield self.image_processor.postprocess(image, output_type=output_type)[0]
230
- latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
231
- torch.cuda.empty_cache()
232
-
233
- latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
234
- latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
235
- image = good_vae.decode(latents, return_dict=False)[0]
236
- self.maybe_free_model_hooks()
237
- torch.cuda.empty_cache()
238
- yield self.image_processor.postprocess(image, output_type=output_type)[0]
239
- except Exception as e:
240
- logger.error(f"Erro na função flux_pipe_call_that_returns_an_iterable_of_images: {e}")
241
- raise e
242
 
243
- #------------------------------------------------------------------------------------------------------------------------------------------------------------#
244
  loras = [
245
  # Super-Realism
246
  {
@@ -254,7 +39,7 @@ loras = [
254
  "image": "https://huggingface.co/vcollos/camila/resolve/main/images/1732936378531__000003000_1.jpg",
255
  "title": "Camila",
256
  "repo": "vcollos/camila",
257
- "weights": "Camila.safetensors", # Corrigido: removido o 's' extra
258
  "trigger_word": "A photo of Camila"
259
  },
260
  {
@@ -299,26 +84,37 @@ loras = [
299
  "weights": "lora.safetensors",
300
  "trigger_word": "A photo of Ditinha"
301
  }
302
-
303
- # add new
304
  ]
305
 
306
- # Initialize the base model
307
- def initialize_models():
308
- logger.info("Inicializando modelos...")
309
- use_auth_token = True
310
- dtype = torch.bfloat16
311
- # Verifica se a GPU está disponível
312
- device = "cuda" if torch.cuda.is_available() else "cpu"
313
- logger.info(f"Usando dispositivo: {device}")
314
- base_model = "black-forest-labs/FLUX.1-dev"
315
-
316
  try:
 
 
 
 
 
 
 
 
 
 
 
317
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
318
- good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
319
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
 
 
 
 
 
 
 
 
 
 
320
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
321
- base_model,
322
  vae=good_vae,
323
  transformer=pipe.transformer,
324
  text_encoder=pipe.text_encoder,
@@ -326,66 +122,43 @@ def initialize_models():
326
  text_encoder_2=pipe.text_encoder_2,
327
  tokenizer_2=pipe.tokenizer_2,
328
  torch_dtype=dtype
329
- )
330
 
331
- # Monkeypatch da função de pipeline
332
- pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
333
 
334
- logger.info("Modelos inicializados com sucesso")
335
- return pipe, pipe_i2i, good_vae, device
 
 
 
 
 
336
  except Exception as e:
337
- logger.error(f"Erro ao inicializar modelos: {e}")
 
338
  raise e
339
 
340
- # Inicialização dos modelos
341
- pipe, pipe_i2i, good_vae, device = initialize_models()
342
-
343
- MAX_SEED = 2**32-1
344
-
345
- def upload_image_to_supabase(image, filename):
346
- """ Faz upload da imagem para o Supabase Storage e retorna a URL pública. """
347
- if not supabase:
348
- logger.warning("Supabase não inicializado. Não será feito upload da imagem.")
349
- return None
350
 
351
- img_bytes = io.BytesIO()
352
- image.save(img_bytes, format="PNG")
353
- img_bytes.seek(0) # Move para o início do arquivo
354
-
355
- storage_path = f"images/{filename}"
356
-
357
- try:
358
- # Faz upload da imagem para o Supabase
359
- supabase.storage.from_("images").upload(storage_path, img_bytes.getvalue(), {"content-type": "image/png"})
360
-
361
- # Retorna a URL pública da imagem
362
- base_url = f"{url}/storage/v1/object/public/images"
363
- return f"{base_url}/{filename}"
364
- except Exception as e:
365
- logger.error(f"Erro no upload da imagem: {e}")
366
- return None
367
-
368
- class calculateDuration:
369
- def __init__(self, activity_name=""):
370
- self.activity_name = activity_name
371
-
372
  def __enter__(self):
373
- self.start_time = time.time()
374
  return self
375
-
376
- def __exit__(self, exc_type, exc_value, traceback):
377
- self.end_time = time.time()
378
- self.elapsed_time = self.end_time - self.start_time
379
- if self.activity_name:
380
- logger.info(f"Tempo para {self.activity_name}: {self.elapsed_time:.6f} segundos")
381
- else:
382
- logger.info(f"Tempo passado: {self.elapsed_time:.6f} segundos")
383
 
 
384
  def update_selection(evt: gr.SelectData, width, height):
385
  selected_lora = loras[evt.index]
386
  new_placeholder = f"Digite o prompt para {selected_lora['title']}, de preferência em inglês."
387
  lora_repo = selected_lora["repo"]
388
  updated_text = f"### Selecionado: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
 
 
389
  if "aspect" in selected_lora:
390
  if selected_lora["aspect"] == "retrato":
391
  width = 768
@@ -396,6 +169,7 @@ def update_selection(evt: gr.SelectData, width, height):
396
  else:
397
  width = 1024
398
  height = 1024
 
399
  return (
400
  gr.update(placeholder=new_placeholder),
401
  updated_text,
@@ -404,384 +178,418 @@ def update_selection(evt: gr.SelectData, width, height):
404
  height,
405
  )
406
 
407
- @spaces.GPU(duration=60) # Reduzido de 100 para 60 para melhor gerenciamento de recursos
408
- def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
409
- try:
410
- logger.info(f"Gerando imagem com prompt: {prompt_mash[:50]}...")
411
- pipe.to("cuda")
412
- generator = torch.Generator(device="cuda").manual_seed(seed)
413
- with calculateDuration("Generating image"):
414
- # Generate image
415
- for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
416
- prompt=prompt_mash,
417
- num_inference_steps=steps,
418
- guidance_scale=cfg_scale,
419
- width=width,
420
- height=height,
421
- generator=generator,
422
- joint_attention_kwargs={"scale": lora_scale},
423
- output_type="pil",
424
- good_vae=good_vae,
425
- ):
426
- yield img
427
- except Exception as e:
428
- logger.error(f"Erro ao gerar imagem: {e}")
429
- raise gr.Error(f"Erro ao gerar imagem: {str(e)}")
430
-
431
- @spaces.GPU(duration=60) # Reduzido para melhor gerenciamento
432
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
433
- try:
434
- logger.info(f"Gerando image-to-image com prompt: {prompt_mash[:50]}...")
435
- generator = torch.Generator(device="cuda").manual_seed(seed)
436
- pipe_i2i.to("cuda")
437
- image_input = load_image(image_input_path)
438
- final_image = pipe_i2i(
439
- prompt=prompt_mash,
440
- image=image_input,
441
- strength=image_strength,
442
- num_inference_steps=steps,
443
- guidance_scale=cfg_scale,
444
- width=width,
445
- height=height,
446
- generator=generator,
447
- joint_attention_kwargs={"scale": lora_scale},
448
- output_type="pil",
449
- ).images[0]
450
- return final_image
451
- except Exception as e:
452
- logger.error(f"Erro ao gerar image-to-image: {e}")
453
- raise gr.Error(f"Erro ao gerar image-to-image: {str(e)}")
454
-
455
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
456
  try:
457
  if selected_index is None:
458
- raise gr.Error("Selecione um modelo para continuar.")
459
 
 
 
 
 
 
 
 
 
460
  selected_lora = loras[selected_index]
461
  lora_path = selected_lora["repo"]
462
- trigger_word = selected_lora["trigger_word"]
463
  qualidade = "<flux.1-dev>"
464
 
465
- logger.info(f"Usando modelo: {lora_path}")
466
-
467
  if trigger_word:
468
- if "trigger_position" in selected_lora:
469
- if selected_lora["trigger_position"] == "prepend":
470
- prompt_mash = f"{trigger_word} {prompt} {qualidade}"
471
- else:
472
- prompt_mash = f"{prompt} {trigger_word} {qualidade}"
473
  else:
474
- prompt_mash = f"{trigger_word} {prompt} {qualidade}"
475
  else:
476
- prompt_mash = prompt
477
-
478
- with calculateDuration("Carregando Modelo"):
479
- pipe.unload_lora_weights()
480
- pipe_i2i.unload_lora_weights()
481
- torch.cuda.empty_cache() # Limpeza explícita da memória
482
 
483
- # LoRA weights flow
484
- with calculateDuration(f"Carregando modelo para {selected_lora['title']}"):
485
- pipe_to_use = pipe_i2i if image_input is not None else pipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  weight_name = selected_lora.get("weights", None)
487
 
488
  try:
489
- pipe_to_use.load_lora_weights(
490
- lora_path,
491
- weight_name=weight_name,
492
  low_cpu_mem_usage=True
493
  )
494
- logger.info(f"LoRA carregado com sucesso: {weight_name}")
495
  except Exception as e:
496
  logger.error(f"Erro ao carregar LoRA: {e}")
497
- raise gr.Error(f"Erro ao carregar o modelo: {str(e)}. Verifique o nome do arquivo e o caminho.")
498
-
499
- with calculateDuration("Gerando fontes"):
500
- if randomize_seed:
501
- seed = random.randint(0, MAX_SEED)
502
-
503
- if image_input is not None:
504
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
505
- yield final_image, seed, gr.update(visible=False)
506
- else:
507
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
508
 
509
- final_image = None
510
- step_counter = 0
511
- try:
512
- for image in image_generator:
513
- step_counter += 1
514
- final_image = image
515
- progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
516
- yield image, seed, gr.update(value=progress_bar, visible=True)
517
- except Exception as e:
518
- logger.error(f"Erro durante geração de imagens: {e}")
519
- raise gr.Error(f"Erro durante geração: {str(e)}")
520
 
521
- yield final_image, seed, gr.update(value=progress_bar, visible=False)
522
-
523
- # Salvar a imagem no Supabase
524
- if final_image and supabase:
525
- filename = f"image_{seed}_{datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')}.png"
526
- image_url = upload_image_to_supabase(final_image, filename)
527
- if image_url:
528
- logger.info(f"Imagem salva no Supabase: {image_url}")
529
- # Salvar metadados no Supabase
530
- try:
531
- response = supabase.table("images").insert({
532
- "prompt": prompt_mash,
533
- "cfg_scale": cfg_scale,
534
- "steps": steps,
535
- "seed": seed,
536
- "lora_scale": lora_scale,
537
- "image_url": image_url,
538
- "created_at": datetime.datetime.utcnow().isoformat()
539
- }).execute()
540
-
541
- if response.data:
542
- logger.info("Metadados salvos no Supabase")
543
- else:
544
- logger.warning("Resposta vazia do Supabase")
545
- except Exception as e:
546
- logger.error(f"Erro ao salvar metadados no Supabase: {e}")
547
  else:
548
- logger.warning("URL da imagem retornou None")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
  except Exception as e:
550
- logger.error(f"Erro na função run_lora: {e}")
 
551
  raise gr.Error(f"Erro: {str(e)}")
552
 
553
- def get_huggingface_safetensors(link):
 
 
 
 
 
 
554
  try:
555
- split_link = link.split("/")
556
- if(len(split_link) == 2):
557
- model_card = ModelCard.load(link)
 
 
 
 
 
 
 
 
 
 
 
558
  base_model = model_card.data.get("base_model")
559
- logger.info(f"Modelo base: {base_model}")
560
-
561
- #Allows Both
562
- if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
563
- raise Exception("Flux LoRA Not Found!")
564
-
565
- # Only allow "black-forest-labs/FLUX.1-dev"
566
- #if base_model != "black-forest-labs/FLUX.1-dev":
567
- #raise Exception("Only FLUX.1-dev is supported, other LoRA models are not allowed!")
568
 
569
- image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
570
  trigger_word = model_card.data.get("instance_prompt", "")
571
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
572
- fs = HfFileSystem()
573
- safetensors_name = None
574
- try:
575
- list_of_files = fs.ls(link, detail=False)
576
- for file in list_of_files:
577
- if(file.endswith(".safetensors")):
578
- safetensors_name = file.split("/")[-1]
579
- if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
580
- image_elements = file.split("/")
581
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
582
- except Exception as e:
583
- logger.error(f"Erro ao listar arquivos: {e}")
584
- raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
585
-
586
- if not safetensors_name:
587
- raise Exception("Nenhum arquivo .safetensors encontrado no repositório")
588
-
589
- return split_link[1], link, safetensors_name, trigger_word, image_url
590
- except Exception as e:
591
- logger.error(f"Erro em get_huggingface_safetensors: {e}")
592
- raise e
593
-
594
- def check_custom_model(link):
595
- try:
596
- if(link.startswith("https://")):
597
- if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
598
- link_split = link.split("huggingface.co/")
599
- return get_huggingface_safetensors(link_split[1])
600
- else:
601
- return get_huggingface_safetensors(link)
602
- except Exception as e:
603
- logger.error(f"Erro em check_custom_model: {e}")
604
- raise e
605
-
606
- def add_custom_lora(custom_lora):
607
- global loras
608
- if custom_lora:
609
  try:
610
- title, repo, path, trigger_word, image = check_custom_model(custom_lora)
611
- logger.info(f"Modelo Externo: {repo}")
612
- card = f'''
613
- <div class="custom_lora_card">
614
- <span>Loaded custom LoRA:</span>
615
- <div class="card_internal">
616
- <img src="{image}" />
617
- <div>
618
- <h3>{title}</h3>
619
- <small>{"Usando: <code><b>"+trigger_word+"</code></b> como palavra-chave" if trigger_word else "Não encontramos a palavra-chave, se tiver, coloque-a no prompt."}<br></small>
620
- </div>
621
- </div>
622
- </div>
623
- '''
624
- existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
625
- if not existing_item_index:
626
- new_item = {
627
- "image": image,
628
- "title": title,
629
- "repo": repo,
630
- "weights": path,
631
- "trigger_word": trigger_word,
632
- }
633
- logger.info(f"Novo item: {new_item}")
634
- existing_item_index = len(loras)
635
- loras.append(new_item)
636
-
637
- return (
638
- gr.update(visible=True, value=card),
639
- gr.update(visible=True),
640
- gr.Gallery(selected_index=None),
641
- f"Custom: {path}",
642
- existing_item_index,
643
- trigger_word,
644
- )
645
  except Exception as e:
646
- error_msg = f"Modelo Inválido: ou o link está errado ou não é um FLUX. Erro: {str(e)}"
647
- logger.error(error_msg)
648
- gr.Warning(error_msg)
649
- return (
650
- gr.update(visible=True, value=f"Modelo Inválido: ou o link está errado ou não é um FLUX"),
651
- gr.update(visible=False),
652
- gr.update(),
653
- "",
654
- None,
655
- "",
656
- )
657
- else:
658
- return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
659
 
660
  def remove_custom_lora():
661
- return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
662
-
663
- # Remova esta linha, pois você já está usando spaces.GPU
664
- # run_lora.zerogpu = True
665
-
666
- collos = gr.themes.Soft(
667
- primary_hue="gray",
668
- secondary_hue="stone",
669
- neutral_hue="slate",
670
- radius_size=gr.themes.Size(lg="15px", md="8px", sm="6px", xl="16px", xs="4px", xxl="24px", xxs="2px")
671
- ).set(
672
- body_background_fill='*primary_100',
673
- embed_radius='*radius_lg',
674
- shadow_drop='0 1px 2px rgba(0, 0, 0, 0.1)',
675
- shadow_drop_lg='0 1px 2px rgba(0, 0, 0, 0.1)',
676
- shadow_inset='0 1px 2px rgba(0, 0, 0, 0.1)',
677
- shadow_spread='0 1px 2px rgba(0, 0, 0, 0.1)',
678
- shadow_spread_dark='0 1px 2px rgba(0, 0, 0, 0.1)',
679
- block_radius='*radius_lg',
680
- block_shadow='*shadow_drop',
681
- container_radius='*radius_lg'
682
- )
683
-
684
- collos.css = """
685
- #group_with_padding {
686
- padding: 20px;
687
- background-color: #f5f5f5;
688
- border: 1px solid #ccc;
689
- }
690
-
691
- #padded_text {
692
- padding: 10px;
693
- background-color: #eef;
694
- border-radius: 5px;
695
- font-size: 16px;
696
- }
697
-
698
- .progress-container {
699
- width: 100%;
700
- background-color: #f1f1f1;
701
- border-radius: 5px;
702
- margin: 10px 0;
703
- }
704
-
705
- .progress-bar {
706
- width: calc(var(--current) / var(--total) * 100%);
707
- height: 20px;
708
- background-color: #4CAF50;
709
- border-radius: 5px;
710
- text-align: center;
711
- line-height: 20px;
712
- color: white;
713
- }
714
- """
715
-
716
- with gr.Blocks(theme=collos, delete_cache=(60, 60)) as app:
717
- title = gr.HTML(
718
- """<img src="https://huggingface.co/spaces/vcollos/Uniodonto/resolve/main/logo/logo_collos_3.png" alt="Logo" style="display: block; margin: 0 auto; padding: 5px 0px 20px 0px; width: 200px;" />""",
719
- elem_id="title",
720
  )
721
- selected_index = gr.State(None)
722
- with gr.Row():
723
- with gr.Column(scale=3):
724
- prompt = gr.Textbox(label="Prompt", lines=1, placeholder=":/ Selecione o modelo ")
725
- with gr.Column(scale=1):
726
- generate_button = gr.Button("Gerar Imagem", variant="primary", elem_id="cta")
727
- with gr.Row():
728
- with gr.Column():
729
- selected_info = gr.Markdown("")
730
- gallery = gr.Gallery(
731
- label="Galeria",
732
- value=[(item["image"], item["title"]) for item in loras], # Argumento nomeado como 'value'
733
- allow_preview=False,
734
- columns=3,
735
- show_share_button=False
736
- )
737
- with gr.Group():
738
- custom_lora = gr.Textbox(label="Selecione um Modelo Externo", placeholder="black-forest-labs/FLUX.1-dev")
739
- gr.Markdown("[Cheque a lista de modelos do Huggingface](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
740
- custom_lora_info = gr.HTML(visible=False)
741
- custom_lora_button = gr.Button("Remova modelo Externo", visible=False)
742
- with gr.Column():
743
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
744
- result = gr.Image(label="Imagem Gerada")
745
 
746
- with gr.Row():
747
- with gr.Accordion("Configurações Avançadas", open=False):
748
- with gr.Row():
749
- input_image = gr.Image(label="Insira uma Imagem", type="filepath")
750
- image_strength = gr.Slider(label="Remossão de ruído", info="Valores mais baixos significam maior influência da imagem.", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
751
  with gr.Column():
752
- with gr.Row():
753
- cfg_scale = gr.Slider(label="Aumentar Escala", minimum=1, maximum=20, step=0.5, value=3.0)
754
- steps = gr.Slider(label="Passos", minimum=1, maximum=50, step=1, value=32)
 
 
 
 
 
755
 
756
- with gr.Row():
757
- width = gr.Slider(label="Largura", minimum=256, maximum=1536, step=64, value=1024)
758
- height = gr.Slider(label="Altura", minimum=256, maximum=1536, step=64, value=1024)
 
 
 
 
 
 
 
 
 
 
 
759
 
 
 
 
 
 
 
 
 
760
  with gr.Row():
761
- randomize_seed = gr.Checkbox(True, label="Fonte Randomizada")
762
- seed = gr.Slider(label="Fontes", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
763
- lora_scale = gr.Slider(label="Escala do Modelo", minimum=0, maximum=3, step=0.01, value=1.20)
764
-
765
- gallery.select(
766
- update_selection,
767
- inputs=[width, height],
768
- outputs=[prompt, selected_info, selected_index, width, height]
769
- )
770
- custom_lora.input(
771
- add_custom_lora,
772
- inputs=[custom_lora],
773
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
774
- )
775
- custom_lora_button.click(
776
- remove_custom_lora,
777
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
778
- )
779
- gr.on(
780
- triggers=[generate_button.click, prompt.submit],
781
- fn=run_lora,
782
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
783
- outputs=[result, seed, progress_bar]
784
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
785
 
786
- app.queue(concurrency_count=1) # Limitar a concorrência para evitar problemas de memória
787
- app.launch()
 
 
 
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import time
3
  import random
4
+ import logging
5
+ import traceback
6
  import numpy as np
 
 
7
  import torch
8
+ import gradio as gr
9
  from PIL import Image
 
10
  import io
 
11
  from datetime import datetime
12
+ from huggingface_hub import HfApi, HfFileSystem, snapshot_download, login
13
 
14
+ # Configurar logging para facilitar debug
 
 
 
 
 
 
 
 
 
 
15
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
16
  logger = logging.getLogger(__name__)
17
 
18
+ # Autenticar com o Hugging Face (necessário para alguns modelos)
19
+ hf_token = os.environ.get("HF_TOKEN")
20
+ if hf_token:
21
+ login(token=hf_token)
22
+ logger.info("Autenticado no Hugging Face")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ # Define o modelo base que vamos usar
25
+ BASE_MODEL = "black-forest-labs/FLUX.1-dev"
26
+ MAX_SEED = 2**32-1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ # Carrega a lista de modelos LoRA disponíveis
29
  loras = [
30
  # Super-Realism
31
  {
 
39
  "image": "https://huggingface.co/vcollos/camila/resolve/main/images/1732936378531__000003000_1.jpg",
40
  "title": "Camila",
41
  "repo": "vcollos/camila",
42
+ "weights": "Camila.safetensors", # Corrigido
43
  "trigger_word": "A photo of Camila"
44
  },
45
  {
 
84
  "weights": "lora.safetensors",
85
  "trigger_word": "A photo of Ditinha"
86
  }
 
 
87
  ]
88
 
89
+ # Função para inicializar os modelos - carregada apenas quando necessário para economizar memória
90
+ def load_models():
 
 
 
 
 
 
 
 
91
  try:
92
+ logger.info("Iniciando carregamento dos modelos...")
93
+ import torch
94
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
95
+
96
+ # Configurar dispositivo e tipo de dados
97
+ device = "cuda" if torch.cuda.is_available() else "cpu"
98
+ dtype = torch.bfloat16 if device == "cuda" else torch.float32
99
+
100
+ logger.info(f"Usando dispositivo: {device} com dtype: {dtype}")
101
+
102
+ # Carregar o autoencoder tiny para previsualização
103
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
104
+
105
+ # Carregar o VAE completo para a imagem final
106
+ good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=dtype).to(device)
107
+
108
+ # Carregar o pipeline principal
109
+ pipe = DiffusionPipeline.from_pretrained(
110
+ BASE_MODEL,
111
+ torch_dtype=dtype,
112
+ vae=taef1
113
+ ).to(device)
114
+
115
+ # Criar pipeline de image-to-image
116
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
117
+ BASE_MODEL,
118
  vae=good_vae,
119
  transformer=pipe.transformer,
120
  text_encoder=pipe.text_encoder,
 
122
  text_encoder_2=pipe.text_encoder_2,
123
  tokenizer_2=pipe.tokenizer_2,
124
  torch_dtype=dtype
125
+ ).to(device)
126
 
127
+ logger.info("Modelos carregados com sucesso")
 
128
 
129
+ return {
130
+ "pipe": pipe,
131
+ "pipe_i2i": pipe_i2i,
132
+ "good_vae": good_vae,
133
+ "device": device,
134
+ "dtype": dtype
135
+ }
136
  except Exception as e:
137
+ logger.error(f"Erro ao carregar modelos: {e}")
138
+ logger.error(traceback.format_exc())
139
  raise e
140
 
141
+ # Classe para medir duração de operações
142
+ class TimeMeasure:
143
+ def __init__(self, name=""):
144
+ self.name = name
 
 
 
 
 
 
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  def __enter__(self):
147
+ self.start = time.time()
148
  return self
149
+
150
+ def __exit__(self, *args):
151
+ self.duration = time.time() - self.start
152
+ logger.info(f"🕒 {self.name}: {self.duration:.2f} segundos")
 
 
 
 
153
 
154
+ # Função para processar a seleção de modelos na interface
155
  def update_selection(evt: gr.SelectData, width, height):
156
  selected_lora = loras[evt.index]
157
  new_placeholder = f"Digite o prompt para {selected_lora['title']}, de preferência em inglês."
158
  lora_repo = selected_lora["repo"]
159
  updated_text = f"### Selecionado: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
160
+
161
+ # Ajustar dimensões conforme especificações do modelo
162
  if "aspect" in selected_lora:
163
  if selected_lora["aspect"] == "retrato":
164
  width = 768
 
169
  else:
170
  width = 1024
171
  height = 1024
172
+
173
  return (
174
  gr.update(placeholder=new_placeholder),
175
  updated_text,
 
178
  height,
179
  )
180
 
181
+ # Função principal de geração de imagens
182
+ def generate_image(prompt, steps, cfg_scale, width, height, selected_index, randomize_seed, seed, lora_scale, input_image=None, image_strength=0.75, progress=gr.Progress()):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
  try:
184
  if selected_index is None:
185
+ raise gr.Error("Por favor, selecione um modelo LoRA primeiro")
186
 
187
+ # Carregar modelos (apenas quando necessário)
188
+ models = load_models()
189
+ pipe = models["pipe"]
190
+ pipe_i2i = models["pipe_i2i"]
191
+ good_vae = models["good_vae"]
192
+ device = models["device"]
193
+
194
+ # Preparar prompt com trigger words
195
  selected_lora = loras[selected_index]
196
  lora_path = selected_lora["repo"]
197
+ trigger_word = selected_lora.get("trigger_word", "")
198
  qualidade = "<flux.1-dev>"
199
 
 
 
200
  if trigger_word:
201
+ trigger_position = selected_lora.get("trigger_position", "prepend")
202
+ if trigger_position == "prepend":
203
+ prompt_full = f"{trigger_word} {prompt} {qualidade}"
 
 
204
  else:
205
+ prompt_full = f"{prompt} {trigger_word} {qualidade}"
206
  else:
207
+ prompt_full = f"{prompt} {qualidade}"
 
 
 
 
 
208
 
209
+ logger.info(f"Prompt completo: {prompt_full}")
210
+
211
+ # Randomizar seed se necessário
212
+ if randomize_seed:
213
+ seed = random.randint(0, MAX_SEED)
214
+
215
+ # Configurar gerador
216
+ generator = torch.Generator(device=device).manual_seed(seed)
217
+ progress(0, desc="Preparando modelo...")
218
+
219
+ # Descarregar pesos LoRA anteriores e limpar cache
220
+ with TimeMeasure("Descarregando modelos anteriores"):
221
+ pipe.unload_lora_weights() if hasattr(pipe, 'unload_lora_weights') else None
222
+ pipe_i2i.unload_lora_weights() if hasattr(pipe_i2i, 'unload_lora_weights') else None
223
+ torch.cuda.empty_cache()
224
+
225
+ # Carregar pesos LoRA
226
+ with TimeMeasure(f"Carregando LoRA {selected_lora['title']}"):
227
+ pipeline_to_use = pipe_i2i if input_image is not None else pipe
228
  weight_name = selected_lora.get("weights", None)
229
 
230
  try:
231
+ pipeline_to_use.load_lora_weights(
232
+ lora_path,
233
+ weight_name=weight_name,
234
  low_cpu_mem_usage=True
235
  )
236
+ logger.info(f"LoRA carregado: {weight_name}")
237
  except Exception as e:
238
  logger.error(f"Erro ao carregar LoRA: {e}")
239
+ raise gr.Error(f"Erro ao carregar LoRA: {str(e)}")
 
 
 
 
 
 
 
 
 
 
240
 
241
+ # Gerar imagem
242
+ with TimeMeasure("Gerando imagem"):
243
+ progress(0.2, desc=f"Gerando imagem com {steps} passos...")
244
+
245
+ if input_image is not None:
246
+ # Modo image-to-image
247
+ from diffusers.utils import load_image
248
+ image_input = load_image(input_image)
 
 
 
249
 
250
+ result = pipe_i2i(
251
+ prompt=prompt_full,
252
+ image=image_input,
253
+ strength=image_strength,
254
+ num_inference_steps=steps,
255
+ guidance_scale=cfg_scale,
256
+ width=width,
257
+ height=height,
258
+ generator=generator,
259
+ joint_attention_kwargs={"scale": lora_scale},
260
+ )
261
+ final_image = result.images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  else:
263
+ # Modo text-to-image com visualização progressiva
264
+ final_image = None
265
+
266
+ # Definir função de callback para atualizar o progresso
267
+ def callback_fn(i, t, latents):
268
+ progress((i + 1) / steps, desc=f"Passo {i+1}/{steps}")
269
+ return True
270
+
271
+ # Generate image
272
+ result = pipe(
273
+ prompt=prompt_full,
274
+ num_inference_steps=steps,
275
+ guidance_scale=cfg_scale,
276
+ width=width,
277
+ height=height,
278
+ generator=generator,
279
+ joint_attention_kwargs={"scale": lora_scale},
280
+ callback=callback_fn,
281
+ callback_steps=1
282
+ )
283
+ final_image = result.images[0]
284
+
285
+ # Limpar cache após geração
286
+ torch.cuda.empty_cache()
287
+
288
+ return final_image, seed
289
+
290
  except Exception as e:
291
+ logger.error(f"Erro ao gerar imagem: {e}")
292
+ logger.error(traceback.format_exc())
293
  raise gr.Error(f"Erro: {str(e)}")
294
 
295
+ # Verifica modelo personalizado
296
+ def add_custom_lora(custom_lora):
297
+ global loras
298
+
299
+ if not custom_lora:
300
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
301
+
302
  try:
303
+ # Processar o link ou ID do modelo
304
+ model_id = custom_lora
305
+ if model_id.startswith("https://huggingface.co/"):
306
+ model_id = model_id.replace("https://huggingface.co/", "")
307
+
308
+ logger.info(f"Verificando modelo: {model_id}")
309
+
310
+ # Verificar se é um modelo FLUX LoRA válido
311
+ fs = HfFileSystem()
312
+
313
+ # Verificar card do modelo
314
+ try:
315
+ from huggingface_hub import ModelCard
316
+ model_card = ModelCard.load(model_id)
317
  base_model = model_card.data.get("base_model")
318
+
319
+ if base_model != "black-forest-labs/FLUX.1-dev" and base_model != "black-forest-labs/FLUX.1-schnell":
320
+ raise gr.Error("Este modelo não é um LoRA do FLUX")
 
 
 
 
 
 
321
 
322
+ title = model_id.split("/")[-1]
323
  trigger_word = model_card.data.get("instance_prompt", "")
324
+
325
+ # Encontrar imagem de exemplo
326
+ card_image = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", "")
327
+ image_url = f"https://huggingface.co/{model_id}/resolve/main/{card_image}" if card_image else None
328
+
329
+ except Exception as e:
330
+ logger.warning(f"Erro ao carregar card: {e}, tentando método alternativo")
331
+ title = model_id.split("/")[-1]
332
+ trigger_word = ""
333
+ image_url = None
334
+
335
+ # Encontrar arquivo de pesos e imagem
336
+ weight_file = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
  try:
338
+ files = fs.ls(model_id, detail=False)
339
+
340
+ for file in files:
341
+ filename = file.split("/")[-1]
342
+
343
+ # Encontrar arquivo de pesos
344
+ if filename.endswith(".safetensors"):
345
+ weight_file = filename
346
+
347
+ # Encontrar imagem se não encontrada no card
348
+ if not image_url and filename.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
349
+ image_url = f"https://huggingface.co/{model_id}/resolve/main/{filename}"
350
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  except Exception as e:
352
+ logger.error(f"Erro ao listar arquivos: {e}")
353
+ raise gr.Error(f"Não foi possível acessar o repositório: {str(e)}")
354
+
355
+ if not weight_file:
356
+ raise gr.Error("Nenhum arquivo .safetensors encontrado no repositório")
357
+
358
+ # Se não encontrou imagem, usar uma placeholder
359
+ if not image_url:
360
+ image_url = "https://huggingface.co/front/assets/huggingface_logo-noborder.svg"
361
+
362
+ # Criar card HTML
363
+ card = f'''
364
+ <div class="custom_lora_card">
365
+ <span>LoRA carregado com sucesso:</span>
366
+ <div class="card_internal">
367
+ <img src="{image_url}" style="max-width: 100px; max-height: 100px;"/>
368
+ <div>
369
+ <h3>{title}</h3>
370
+ <small>{"Usando: <code><b>"+trigger_word+"</code></b> como palavra-chave" if trigger_word else "Não encontramos a palavra-chave, se tiver, coloque-a no prompt."}<br></small>
371
+ </div>
372
+ </div>
373
+ </div>
374
+ '''
375
+
376
+ # Verificar se já existe na lista
377
+ existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == model_id), None)
378
+
379
+ if existing_item_index is None:
380
+ new_item = {
381
+ "image": image_url,
382
+ "title": title,
383
+ "repo": model_id,
384
+ "weights": weight_file,
385
+ "trigger_word": trigger_word,
386
+ }
387
+
388
+ existing_item_index = len(loras)
389
+ loras.append(new_item)
390
+ logger.info(f"Adicionado novo modelo: {title}")
391
+
392
+ return (
393
+ gr.update(visible=True, value=card),
394
+ gr.update(visible=True),
395
+ gr.Gallery(value=[(item["image"], item["title"]) for item in loras]),
396
+ f"Modelo: {title}",
397
+ existing_item_index,
398
+ trigger_word if trigger_word else "",
399
+ )
400
+
401
+ except Exception as e:
402
+ logger.error(f"Erro ao adicionar modelo: {e}")
403
+ error_msg = f"Modelo inválido: {str(e)}"
404
+ return (
405
+ gr.update(visible=True, value=error_msg),
406
+ gr.update(visible=False),
407
+ gr.update(),
408
+ "",
409
+ None,
410
+ "",
411
+ )
412
 
413
  def remove_custom_lora():
414
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
415
+
416
+ # Interface Gradio
417
+ def create_interface():
418
+ # Tema personalizado
419
+ collos = gr.themes.Soft(
420
+ primary_hue="gray",
421
+ secondary_hue="stone",
422
+ neutral_hue="slate",
423
+ radius_size=gr.themes.Size(lg="15px", md="8px", sm="6px", xl="16px", xs="4px", xxl="24px", xxs="2px")
424
+ ).set(
425
+ body_background_fill='*primary_100',
426
+ embed_radius='*radius_lg',
427
+ shadow_drop='0 1px 2px rgba(0, 0, 0, 0.1)',
428
+ shadow_drop_lg='0 1px 2px rgba(0, 0, 0, 0.1)',
429
+ shadow_inset='0 1px 2px rgba(0, 0, 0, 0.1)',
430
+ shadow_spread='0 1px 2px rgba(0, 0, 0, 0.1)',
431
+ shadow_spread_dark='0 1px 2px rgba(0, 0, 0, 0.1)',
432
+ block_radius='*radius_lg',
433
+ block_shadow='*shadow_drop',
434
+ container_radius='*radius_lg'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
 
437
+ # CSS personalizado
438
+ css = """
439
+ #group_with_padding {
440
+ padding: 20px;
441
+ background-color: #f5f5f5;
442
+ border: 1px solid #ccc;
443
+ }
444
+
445
+ #padded_text {
446
+ padding: 10px;
447
+ background-color: #eef;
448
+ border-radius: 5px;
449
+ font-size: 16px;
450
+ }
451
+
452
+ .custom_lora_card {
453
+ padding: 10px;
454
+ background-color: #f5f5f5;
455
+ border-radius: 10px;
456
+ margin-top: 10px;
457
+ }
458
+
459
+ .card_internal {
460
+ display: flex;
461
+ align-items: center;
462
+ margin-top: 10px;
463
+ }
464
+
465
+ .card_internal img {
466
+ margin-right: 15px;
467
+ border-radius: 5px;
468
+ }
469
+ """
470
+
471
+ # Interface principal
472
+ with gr.Blocks(theme=collos, css=css) as interface:
473
+ # Logo
474
+ title = gr.HTML(
475
+ """<img src="https://huggingface.co/spaces/vcollos/Uniodonto/resolve/main/logo/logo_collos_3.png" alt="Logo" style="display: block; margin: 0 auto; padding: 5px 0px 20px 0px; width: 200px;" />""",
476
+ elem_id="title",
477
+ )
478
+
479
+ # Estado para armazenar o índice do modelo selecionado
480
+ selected_index = gr.State(None)
481
+
482
+ # Seção principal
483
+ with gr.Row():
484
+ with gr.Column(scale=3):
485
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Selecione um modelo primeiro")
486
+ with gr.Column(scale=1):
487
+ generate_button = gr.Button("Gerar Imagem", variant="primary", elem_id="cta")
488
+
489
+ # Galeria e resultados
490
+ with gr.Row():
491
  with gr.Column():
492
+ selected_info = gr.Markdown("")
493
+ gallery = gr.Gallery(
494
+ label="Modelos Disponíveis",
495
+ value=[(item["image"], item["title"]) for item in loras],
496
+ allow_preview=False,
497
+ columns=3,
498
+ show_share_button=False
499
+ )
500
 
501
+ # Seção LoRA personalizado
502
+ with gr.Group():
503
+ custom_lora = gr.Textbox(
504
+ label="Adicionar Modelo Externo",
505
+ placeholder="Nome do modelo ou URL (ex: vcollos/VitorCollos)"
506
+ )
507
+ gr.Markdown(
508
+ "[Ver modelos FLUX no Hugging Face](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)",
509
+ elem_id="lora_list"
510
+ )
511
+
512
+ # Informações do modelo personalizado
513
+ custom_lora_info = gr.HTML(visible=False)
514
+ custom_lora_button = gr.Button("Remover Modelo Externo", visible=False)
515
 
516
+ # Resultado da geração
517
+ with gr.Column():
518
+ result = gr.Image(label="Imagem Gerada", type="pil")
519
+ seed_output = gr.Number(label="Seed", precision=0)
520
+
521
+ # Configurações avançadas
522
+ with gr.Row():
523
+ with gr.Accordion("Configurações Avançadas", open=False):
524
  with gr.Row():
525
+ input_image = gr.Image(label="Imagem de Referência (opcional)", type="filepath")
526
+ image_strength = gr.Slider(
527
+ label="Força da Imagem Original",
528
+ info="Valores menores preservam mais da imagem original",
529
+ minimum=0.1,
530
+ maximum=1.0,
531
+ step=0.01,
532
+ value=0.75
533
+ )
534
+
535
+ with gr.Column():
536
+ with gr.Row():
537
+ cfg_scale = gr.Slider(label="Escala de Orientação (CFG)", minimum=1, maximum=20, step=0.5, value=3.0)
538
+ steps = gr.Slider(label="Passos de Inferência", minimum=1, maximum=50, step=1, value=32)
539
+
540
+ with gr.Row():
541
+ width = gr.Slider(label="Largura", minimum=256, maximum=1536, step=64, value=1024)
542
+ height = gr.Slider(label="Altura", minimum=256, maximum=1536, step=64, value=1024)
543
+
544
+ with gr.Row():
545
+ randomize_seed = gr.Checkbox(True, label="Seed Aleatória")
546
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
547
+ lora_scale = gr.Slider(label="Intensidade do LoRA", minimum=0, maximum=3, step=0.01, value=1.20)
548
+
549
+ # Eventos
550
+ gallery.select(
551
+ update_selection,
552
+ inputs=[width, height],
553
+ outputs=[prompt, selected_info, selected_index, width, height]
554
+ )
555
+
556
+ custom_lora.change(
557
+ add_custom_lora,
558
+ inputs=[custom_lora],
559
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
560
+ )
561
+
562
+ custom_lora_button.click(
563
+ remove_custom_lora,
564
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
565
+ )
566
+
567
+ generate_inputs = [
568
+ prompt, steps, cfg_scale, width, height, selected_index,
569
+ randomize_seed, seed, lora_scale, input_image, image_strength
570
+ ]
571
+
572
+ generate_outputs = [result, seed_output]
573
+
574
+ generate_button.click(generate_image, inputs=generate_inputs, outputs=generate_outputs)
575
+ prompt.submit(generate_image, inputs=generate_inputs, outputs=generate_outputs)
576
+
577
+ # Informações iniciais
578
+ gr.Markdown(
579
+ """
580
+ ## Gerador de Imagens com FLUX LoRA
581
+
582
+ 1. Selecione um modelo na galeria
583
+ 2. Digite um prompt (preferencialmente em inglês)
584
+ 3. Clique em "Gerar Imagem"
585
+
586
+ Use as configurações avançadas para ajustar parâmetros como tamanho, número de passos, etc.
587
+ """
588
+ )
589
+
590
+ return interface
591
 
592
+ # Criar interface e iniciar
593
+ if __name__ == "__main__":
594
+ app = create_interface()
595
+ app.queue(concurrency_count=1).launch(debug=True)