Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,29 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
2 |
|
3 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
def infer_image(prompt, steps, cfg_scale, seed, width, height):
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
height=height
|
13 |
-
)
|
14 |
-
return result
|
15 |
|
16 |
-
# Interface
|
17 |
interface = gr.Interface(
|
18 |
fn=infer_image,
|
19 |
inputs=[
|
@@ -27,5 +37,5 @@ interface = gr.Interface(
|
|
27 |
outputs="image"
|
28 |
)
|
29 |
|
30 |
-
#
|
31 |
interface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from diffusers import StableDiffusionPipeline
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
6 |
+
# Função para carregar o modelo base com LoRa
|
7 |
+
def load_model_with_lora(base_model_path, lora_model_path):
|
8 |
+
# Carrega o modelo base
|
9 |
+
pipeline = StableDiffusionPipeline.from_pretrained(base_model_path, torch_dtype=torch.float16).to("cuda")
|
10 |
+
|
11 |
+
# Carrega a LoRa e aplica ao modelo base
|
12 |
+
pipeline.load_lora_weights(lora_model_path)
|
13 |
+
|
14 |
+
return pipeline
|
15 |
+
|
16 |
+
# Função de inferência de imagem com parâmetros ajustáveis
|
17 |
def infer_image(prompt, steps, cfg_scale, seed, width, height):
|
18 |
+
pipeline = load_model_with_lora("black-forest-labs/FLUX.1-dev", "rorito/testSCG-Anatomy-Flux1")
|
19 |
+
|
20 |
+
# Configurações adicionais
|
21 |
+
generator = torch.manual_seed(seed)
|
22 |
+
result = pipeline(prompt, num_inference_steps=steps, guidance_scale=cfg_scale, width=width, height=height, generator=generator)
|
23 |
+
|
24 |
+
return result.images[0]
|
|
|
|
|
|
|
25 |
|
26 |
+
# Interface do Gradio com parâmetros ajustáveis
|
27 |
interface = gr.Interface(
|
28 |
fn=infer_image,
|
29 |
inputs=[
|
|
|
37 |
outputs="image"
|
38 |
)
|
39 |
|
40 |
+
# Lançar o aplicativo Gradio
|
41 |
interface.launch()
|