File size: 4,049 Bytes
75b503f
6c1f991
 
 
 
 
 
 
28e7981
2722715
6c1f991
 
 
 
 
 
 
 
 
 
 
28e7981
6c1f991
 
 
3b6278a
6c1f991
 
 
 
 
 
 
 
 
 
 
17702fd
6c1f991
 
 
17702fd
6c1f991
 
 
17702fd
 
6c1f991
 
 
 
17702fd
6c1f991
 
 
17702fd
6c1f991
 
 
 
 
 
19c67d0
6c1f991
 
 
219ad6f
6c1f991
 
15c6bcb
6c1f991
 
179a192
609ddae
6c1f991
 
179a192
6c1f991
19c67d0
6c1f991
 
 
74c8944
17702fd
219ad6f
0d464c8
6c1f991
 
 
 
 
 
 
 
 
609ddae
6c1f991
6015857
 
 
 
 
 
 
6c1f991
 
 
17702fd
6c1f991
 
 
17702fd
0d464c8
6c1f991
15c6bcb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import spaces
import torch
from diffusers import FluxPipeline
import gradio as gr
import random
import numpy as np
import os

#from huggingface_hub import login


if torch.cuda.is_available():
    device = "cuda"
    print("Using GPU")
else:
    device = "cpu"
    print("Using CPU")


# login hf token
HF_TOKEN = os.getenv("HF_TOKEN")
#login(token=HF_TOKEN)


MAX_SEED = np.iinfo(np.int32).max
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"

# Initialize the pipeline and download the model
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.to(device)

# Enable memory optimizations
pipe.enable_attention_slicing()


# Define the image generation function
@spaces.GPU(duration=180)
def generate_image(promptx, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt, progress=gr.Progress(track_tqdm=True)):
    if seed == 0:
        seed = random.randint(1, MAX_SEED)

    generato = torch.Generator().manual_seed(seed)
    
    
    with torch.inference_mode():
        out = pipe(
            prompt=promptx,
            num_inference_steps=num_inference_steps,
            height=height,
            width=width,
            guidance_scale=guidance_scale,
            generator=generato,
            num_images_per_prompt=num_images_per_prompt
        ).images
    
    return out



# Create the Gradio interface

examples = [
    ["Full-body, realistic photo of a network engineer in a data center, conducting an experiment"]
]

css = '''
.gradio-container{max-width: 100% !important}
h1{text-align:center}
'''
with gr.Blocks(css=css) as fluxobj:
    with gr.Row():
        with gr.Column():
            gr.Markdown(
            """ # FLUX.1-dev
            """
        )
            gr.Markdown(
                """
                Made by csit.udru.ac.th for non-commercial license
                """
        )
    with gr.Group():
        with gr.Row():
            promptx = gr.Textbox(label="", show_label=False, info="", placeholder="Describe the image you want")
            run_button = gr.Button("Generate", scale=0)
        resultf = gr.Gallery(label="Generated AI Images", elem_id="gallery")
    with gr.Accordion("Advanced options", open=False):
        with gr.Row():
            num_inference_steps = gr.Slider(label="Number of Inference Steps", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference", minimum=1, maximum=50, value=25, step=1)
            guidance_scale = gr.Slider(label="Guidance Scale", info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.", minimum=0.0, maximum=7.0, value=3.5, step=0.1)
        with gr.Row():
            width = gr.Slider(label="Width", info="Width of the Image", minimum=256, maximum=1024, step=32, value=1024)
            height = gr.Slider(label="Height", info="Height of the Image", minimum=256, maximum=1024, step=32, value=1024)
        with gr.Row():
            seed = gr.Slider(value=42, minimum=0, maximum=MAX_SEED, step=1, label="Seed", info="A starting point to initiate the generation process, put 0 for a random one")
            num_images_per_prompt = gr.Slider(label="Images Per Prompt", info="Number of Images to generate with the settings",minimum=1, maximum=4, step=1, value=1)

    # gr.Examples(
    #     examples=examples,
    #     fn=generate_image,
    #     inputs=[promptx, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt],
    #     outputs=[resultf],
    #     cache_examples=CACHE_EXAMPLES
    # )

    gr.on(
        triggers=[
            promptx.submit,
            run_button.click,
        ],
        fn=generate_image,
        inputs=[promptx, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt],
        outputs=[resultf],
    )
if __name__ == "__main__":
   fluxobj.queue(max_size=20).launch()