Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,22 +15,30 @@ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
|
15 |
|
16 |
lora_repo = "kaytoo2022/cara-the-cavapoo-flux"
|
17 |
trigger_word = "" # Leave trigger_word blank if not used.
|
18 |
-
pipe.load_lora_weights(lora_repo)
|
19 |
|
20 |
-
#
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
pipe.to("cuda")
|
24 |
|
25 |
MAX_SEED = 2**32-1
|
26 |
|
27 |
@spaces.GPU()
|
28 |
-
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
29 |
# Set random seed for reproducibility
|
30 |
if randomize_seed:
|
31 |
seed = random.randint(0, MAX_SEED)
|
32 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
33 |
|
|
|
|
|
34 |
# Update progress bar (0% saat mulai)
|
35 |
progress(0, "Starting image generation...")
|
36 |
|
@@ -48,7 +56,7 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
|
|
48 |
width=width,
|
49 |
height=height,
|
50 |
generator=generator,
|
51 |
-
joint_attention_kwargs={"scale": lora_scale},
|
52 |
).images[0]
|
53 |
|
54 |
# Final update (100%)
|
@@ -57,19 +65,21 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
|
|
57 |
yield image, seed
|
58 |
|
59 |
# Example cached image and settings
|
60 |
-
example_image_path = "
|
61 |
-
example_prompt = """
|
62 |
example_cfg_scale = 3.2
|
63 |
example_steps = 32
|
64 |
example_width = 1152
|
65 |
example_height = 896
|
66 |
example_seed = 3981632454
|
67 |
example_lora_scale = 0.85
|
|
|
|
|
68 |
|
69 |
def load_example():
|
70 |
# Load example image from file
|
71 |
example_image = Image.open(example_image_path)
|
72 |
-
return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_image
|
73 |
|
74 |
with gr.Blocks() as app:
|
75 |
gr.Markdown("# Flux Lora Image Generator")
|
@@ -84,16 +94,18 @@ with gr.Blocks() as app:
|
|
84 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
85 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
|
86 |
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
|
|
|
|
|
87 |
with gr.Column(scale=1):
|
88 |
result = gr.Image(label="Generated Image")
|
89 |
-
gr.Markdown("Generate images using
|
90 |
|
91 |
# Automatically load example data and image when the interface is launched
|
92 |
-
app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result])
|
93 |
|
94 |
generate_button.click(
|
95 |
run_lora,
|
96 |
-
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
|
97 |
outputs=[result, seed]
|
98 |
)
|
99 |
|
|
|
15 |
|
16 |
lora_repo = "kaytoo2022/cara-the-cavapoo-flux"
|
17 |
trigger_word = "" # Leave trigger_word blank if not used.
|
18 |
+
pipe.load_lora_weights(lora_repo, adapter_name='cara')
|
19 |
|
20 |
+
# ghibsky
|
21 |
+
lora_repo_2 = "aleksa-codes/flux-ghibsky-illustration"
|
22 |
+
pipe.load_lora_weights(lora_repo_2, adapter_name='lora_2')
|
23 |
+
|
24 |
+
lora_repo_3 = "Datou1111/shou_xin"
|
25 |
+
pipe.load_lora_weights(lora_repo_3, adapter_name='lora_3')
|
26 |
+
|
27 |
+
pipe.set_adapters(["cara", "lora_2", "lora_3"], adapter_weights=[0.85, 0.0, 0.0])
|
28 |
|
29 |
pipe.to("cuda")
|
30 |
|
31 |
MAX_SEED = 2**32-1
|
32 |
|
33 |
@spaces.GPU()
|
34 |
+
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, lora_scale_2, lora_scale_3, progress=gr.Progress(track_tqdm=True)):
|
35 |
# Set random seed for reproducibility
|
36 |
if randomize_seed:
|
37 |
seed = random.randint(0, MAX_SEED)
|
38 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
39 |
|
40 |
+
pipe.set_adapters(["cara", "lora_2", "lora_3"], adapter_weights=[lora_scale, lora_scale_2, lora_scale_3])
|
41 |
+
|
42 |
# Update progress bar (0% saat mulai)
|
43 |
progress(0, "Starting image generation...")
|
44 |
|
|
|
56 |
width=width,
|
57 |
height=height,
|
58 |
generator=generator,
|
59 |
+
# joint_attention_kwargs={"scale": lora_scale},
|
60 |
).images[0]
|
61 |
|
62 |
# Final update (100%)
|
|
|
65 |
yield image, seed
|
66 |
|
67 |
# Example cached image and settings
|
68 |
+
example_image_path = "bella_space.jpeg" # Replace with the actual path to the example image
|
69 |
+
example_prompt = """A portrait picture of b3lla dog in an astronaut outfit. Planets are visible in the background"""
|
70 |
example_cfg_scale = 3.2
|
71 |
example_steps = 32
|
72 |
example_width = 1152
|
73 |
example_height = 896
|
74 |
example_seed = 3981632454
|
75 |
example_lora_scale = 0.85
|
76 |
+
example_lora_scale_2 = 0.0
|
77 |
+
example_lora_scale_3 = 0.0
|
78 |
|
79 |
def load_example():
|
80 |
# Load example image from file
|
81 |
example_image = Image.open(example_image_path)
|
82 |
+
return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_lora_scale_2, example_lora_scale_3, example_image
|
83 |
|
84 |
with gr.Blocks() as app:
|
85 |
gr.Markdown("# Flux Lora Image Generator")
|
|
|
94 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
95 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
|
96 |
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
|
97 |
+
lora_scale_2 = gr.Slider(label="LoRA Scale (GhibSky)", minimum=0, maximum=1, step=0.01, value=example_lora_scale_2)
|
98 |
+
lora_scale_3 = gr.Slider(label="LoRA Scale (Sketch)", minimum=0, maximum=1, step=0.01, value=example_lora_scale_3)
|
99 |
with gr.Column(scale=1):
|
100 |
result = gr.Image(label="Generated Image")
|
101 |
+
gr.Markdown("Generate images using Flux and a text prompt.\nUse `b3lla dog` in the prompt to trigger generating an image of Bella the dog.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")
|
102 |
|
103 |
# Automatically load example data and image when the interface is launched
|
104 |
+
app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, lora_scale_2, lora_scale_3, result])
|
105 |
|
106 |
generate_button.click(
|
107 |
run_lora,
|
108 |
+
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, lora_scale_2, lora_scale_3],
|
109 |
outputs=[result, seed]
|
110 |
)
|
111 |
|