File size: 2,867 Bytes
f043377
 
 
 
21ad34f
f0827fa
 
55190ff
 
c69b53e
f043377
97b0d60
f3ac33c
f043377
55190ff
 
5b81cb7
f3ac33c
8a0ba18
f3ac33c
 
 
c69b53e
 
8a0ba18
f3ac33c
f043377
 
 
 
 
f3ac33c
4da0e12
f043377
 
 
 
 
 
 
 
 
 
 
 
8a0ba18
f043377
f3ac33c
f043377
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
import spaces

import torch
from diffusers import FluxPipeline
from diffusers import FluxImg2ImgPipeline
from diffusers.utils import load_image


from huggingface_hub.utils import RepositoryNotFoundError

pipeline = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.float16).to("cuda")
pipelineImg = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.float16).to("cuda")



@spaces.GPU(duration=70)
def generate(image, prompt, negative_prompt, width, height, sample_steps, lora_id):
    try:
        # pipeline.load_lora_weights(lora_id)
        init_image = load_image(image).resize((1024, 1024))
        pipelineImg.load_lora_weights(lora_id)
    except RepositoryNotFoundError:
        raise ValueError(f"Recieved invalid FLUX LoRA.")
    
    return pipeline(prompt=f"{prompt}\nDO NOT INCLUDE {negative_prompt}", image=init_image, width=width, height=height, num_inference_steps=sample_steps, generator=torch.Generator("cpu").manual_seed(42), guidance_scale=7).images[0]

with gr.Blocks() as interface:
        with gr.Column():
            with gr.Row():
                with gr.Column():
                    image = gr.Image(label="Input image", show_label=False, type="filepath")
                    prompt = gr.Textbox(label="Prompt", info="What do you want?", value="Keanu Reeves holding a neon sign reading 'Hello, world!', 32k HDR, paparazzi", lines=4, interactive=True)
                    negative_prompt = gr.Textbox(label="Negative Prompt", info="What do you want to exclude from the image?", value="ugly, low quality", lines=4, interactive=True)
                with gr.Column():
                    generate_button = gr.Button("Generate")
                    output = gr.Image()
            with gr.Row():
                with gr.Accordion(label="Advanced Settings", open=False):
                    with gr.Row():
                        with gr.Column():
                            width = gr.Slider(label="Width", info="The width in pixels of the generated image.", value=512, minimum=128, maximum=4096, step=64, interactive=True)
                            height = gr.Slider(label="Height", info="The height in pixels of the generated image.", value=512, minimum=128, maximum=4096, step=64, interactive=True)
                        with gr.Column():
                            sampling_steps = gr.Slider(label="Sampling Steps", info="The number of denoising steps.", value=20, minimum=4, maximum=50, step=1, interactive=True)
                            lora_id = gr.Textbox(label="Adapter Repository", info="ID of the FLUX LoRA", value="pepper13/fluxfw")
        
        generate_button.click(fn=generate, inputs=[image, prompt, negative_prompt, width, height, sampling_steps, lora_id], outputs=[output])

if __name__ == "__main__":
    interface.launch()